code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=6_4 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=[1, 1_6, 4, 4] , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :int = parent
lowerCAmelCase__ :str = batch_size
lowerCAmelCase__ :str = image_size
lowerCAmelCase__ :Optional[int] = patch_size
lowerCAmelCase__ :Optional[Any] = num_channels
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :List[str] = use_labels
lowerCAmelCase__ :str = hidden_size
lowerCAmelCase__ :Optional[Any] = num_hidden_layers
lowerCAmelCase__ :Optional[Any] = num_attention_heads
lowerCAmelCase__ :Tuple = intermediate_size
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = type_sequence_label_size
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :str = scope
lowerCAmelCase__ :str = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase__ :Dict = (self.image_size // 3_2) ** 2
lowerCAmelCase__ :List[Any] = num_patches + 1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :List[Any] = None
if self.use_labels:
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :int = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 1_6, 3_2],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = ViTHybridModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.type_sequence_label_size
lowerCAmelCase__ :Union[str, Any] = ViTHybridForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = config_and_inputs
lowerCAmelCase__ :Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__magic_name__ :Optional[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ :int = False
__magic_name__ :Dict = False
__magic_name__ :List[Any] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ViTHybridModelTester(self )
lowerCAmelCase__ :Any = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :int = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :int = [*signature.parameters.keys()]
lowerCAmelCase__ :Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Optional[Any] = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ :Any = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase__ :int = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Dict = ViTHybridModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.default_image_processor
lowerCAmelCase__ :Tuple = prepare_img()
lowerCAmelCase__ :Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :List[str] = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase__ :Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
lowerCAmelCase__ :List[Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
lowerCAmelCase__ :List[str] = prepare_img()
lowerCAmelCase__ :Union[str, Any] = image_processor(images=__UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase__ :Optional[Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase__ :str = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 254
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :int = credit_card_number
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase__ :Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 254
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : List[Any] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : Union[str, Any] = {
"yjernite/retribert-base-uncased": 5_1_2,
}
_lowerCAmelCase : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _UpperCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = RetriBertTokenizer
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self :List[str] , lowerCamelCase :Tuple=None , lowerCamelCase :Union[str, Any]=None , lowerCamelCase :Any=True , lowerCamelCase :int="[UNK]" , lowerCamelCase :List[str]="[SEP]" , lowerCamelCase :str="[PAD]" , lowerCamelCase :Union[str, Any]="[CLS]" , lowerCamelCase :List[Any]="[MASK]" , lowerCamelCase :Union[str, Any]=True , lowerCamelCase :List[Any]=None , **lowerCamelCase :Any , ) -> Tuple:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**__lowerCamelCase )
UpperCAmelCase__ = do_lower_case
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Tuple , lowerCamelCase :List[Any]=None ) -> str:
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[int] , lowerCamelCase :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :str , lowerCamelCase :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 169
|
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : list[list[int]] = [[0 for _ in range(snake_case__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCAmelCase : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1, snake_case__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_snake_case = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_snake_case = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 157
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowerCAmelCase :List[Any] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowerCAmelCase :List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowerCAmelCase :int = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __lowerCAmelCase ( self : Optional[int] , _A : Tuple , _A : Dict , _A : Any=False ) -> Any:
if return_pvalue:
__magic_name__ : int = pearsonr(lowercase_ , lowercase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
| 356
|
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 275
| 0
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Tuple , __snake_case : int ):
'''simple docstring'''
if isinstance(__snake_case , torch.Tensor ):
return image
elif isinstance(__snake_case , PIL.Image.Image ):
lowercase = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowercase = np.concatenate(__snake_case , axis=0 )
lowercase = np.array(__snake_case ).astype(np.floataa ) / 255.0
lowercase = image.transpose(0 , 3 , 1 , 2 )
lowercase = 2.0 * image - 1.0
lowercase = torch.from_numpy(__snake_case )
elif isinstance(image[0] , torch.Tensor ):
lowercase = torch.cat(__snake_case , dim=0 )
return image
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Any=0.9995 ):
'''simple docstring'''
if not isinstance(__snake_case , np.ndarray ):
lowercase = True
lowercase = va.device
lowercase = va.cpu().numpy()
lowercase = va.cpu().numpy()
lowercase = np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
lowercase = (1 - t) * va + t * va
else:
lowercase = np.arccos(__snake_case )
lowercase = np.sin(__snake_case )
lowercase = theta_a * t
lowercase = np.sin(__snake_case )
lowercase = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase = sin_theta_t / sin_theta_a
lowercase = sa * va + sa * va
if inputs_are_torch:
lowercase = torch.from_numpy(__snake_case ).to(__snake_case )
return va
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : List[Any] ):
'''simple docstring'''
lowercase = F.normalize(__snake_case , dim=-1 )
lowercase = F.normalize(__snake_case , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
for param in model.parameters():
lowercase = value
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
super().__init__()
self.register_modules(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , clip_model=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , feature_extractor=_lowerCamelCase , coca_model=_lowerCamelCase , coca_tokenizer=_lowerCamelCase , coca_transform=_lowerCamelCase , )
lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size , _lowerCamelCase )
else feature_extractor.size['shortest_edge']
)
lowercase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _lowerCamelCase )
set_requires_grad(self.clip_model , _lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCamelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae , _lowerCamelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae , _lowerCamelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet , _lowerCamelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet , _lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# get the original timestep using init_timestep
lowercase = min(int(num_inference_steps * strength ) , _lowerCamelCase )
lowercase = max(num_inference_steps - init_timestep , 0 )
lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(_lowerCamelCase )}' )
lowercase = image.to(device=_lowerCamelCase , dtype=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCamelCase )
]
lowercase = torch.cat(_lowerCamelCase , dim=0 )
else:
lowercase = self.vae.encode(_lowerCamelCase ).latent_dist.sample(_lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase = 0.1_8_2_1_5 * init_latents
lowercase = init_latents.repeat_interleave(_lowerCamelCase , dim=0 )
lowercase = randn_tensor(init_latents.shape , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
# get latents
lowercase = self.scheduler.add_noise(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = init_latents
return latents
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.coca_transform(_lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.feature_extractor.preprocess(_lowerCamelCase )
lowercase = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase = self.clip_model.get_image_features(_lowerCamelCase )
lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCamelCase )
lowercase = image_embeddings_clip.repeat_interleave(_lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = latents.detach().requires_grad_()
lowercase = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
lowercase = self.unet(_lowerCamelCase , _lowerCamelCase , encoder_hidden_states=_lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase = self.scheduler.alphas_cumprod[timestep]
lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase = torch.sqrt(_lowerCamelCase )
lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _lowerCamelCase ):
lowercase = self.scheduler.sigmas[index]
lowercase = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase = 1 / 0.1_8_2_1_5 * sample
lowercase = self.vae.decode(_lowerCamelCase ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = transforms.Resize(self.feature_extractor_size )(_lowerCamelCase )
lowercase = self.normalize(_lowerCamelCase ).to(latents.dtype )
lowercase = self.clip_model.get_image_features(_lowerCamelCase )
lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCamelCase )
lowercase = spherical_dist_loss(_lowerCamelCase , _lowerCamelCase ).mean() * clip_guidance_scale
lowercase = -torch.autograd.grad(_lowerCamelCase , _lowerCamelCase )[0]
if isinstance(self.scheduler , _lowerCamelCase ):
lowercase = latents.detach() + grads * (sigma**2)
lowercase = noise_pred_original
else:
lowercase = noise_pred_original - torch.sqrt(_lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 0.6 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = 1_0_0 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = 0.8 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , ):
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(_lowerCamelCase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(_lowerCamelCase , torch.Generator ) and batch_size > 1:
lowercase = [generator] + [None] * (batch_size - 1)
lowercase = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowercase = [x[0] for x in coca_is_none if x[1]]
lowercase = ', '.join(_lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCamelCase ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowercase = self.get_image_description(_lowerCamelCase )
if style_prompt is None:
if len(_lowerCamelCase ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowercase = self.get_image_description(_lowerCamelCase )
# get prompt text embeddings for content and style
lowercase = self.tokenizer(
_lowerCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='pt' , )
lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase = self.tokenizer(
_lowerCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors='pt' , )
lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase = slerp(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# duplicate text embeddings for each generation per prompt
lowercase = text_embeddings.repeat_interleave(_lowerCamelCase , dim=0 )
# set timesteps
lowercase = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase = {}
if accepts_offset:
lowercase = 1
self.scheduler.set_timesteps(_lowerCamelCase , **_lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase , lowercase = self.get_timesteps(_lowerCamelCase , _lowerCamelCase , self.device )
lowercase = timesteps[:1].repeat(_lowerCamelCase )
# Preprocess image
lowercase = preprocess(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = self.prepare_latents(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , text_embeddings.dtype , self.device , _lowerCamelCase )
lowercase = preprocess(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = self.prepare_latents(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , text_embeddings.dtype , self.device , _lowerCamelCase )
lowercase = slerp(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if clip_guidance_scale > 0:
lowercase = self.get_clip_image_embeddings(_lowerCamelCase , _lowerCamelCase )
lowercase = self.get_clip_image_embeddings(_lowerCamelCase , _lowerCamelCase )
lowercase = slerp(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = content_text_input.input_ids.shape[-1]
lowercase = self.tokenizer([''] , padding='max_length' , max_length=_lowerCamelCase , return_tensors='pt' )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase = uncond_embeddings.repeat_interleave(_lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device='cpu' , dtype=_lowerCamelCase ).to(
self.device )
else:
lowercase = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
# check if the scheduler accepts generator
lowercase = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase = generator
with self.progress_bar(total=_lowerCamelCase ):
for i, t in enumerate(_lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
lowercase = self.unet(_lowerCamelCase , _lowerCamelCase , encoder_hidden_states=_lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase , lowercase = self.cond_fn(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase = 1 / 0.1_8_2_1_5 * latents
lowercase = self.vae.decode(_lowerCamelCase ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCamelCase , nsfw_content_detected=_lowerCamelCase )
| 220
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self ):
lowercase , lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = sd_pipe.prepare_inputs(_lowerCamelCase )
lowercase = replicate(_lowerCamelCase )
lowercase = shard(_lowerCamelCase )
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(_lowerCamelCase , jax.device_count() )
lowercase = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=2_5 , jit=_lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ):
lowercase = 'stabilityai/stable-diffusion-2'
lowercase , lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCamelCase , subfolder='scheduler' )
lowercase , lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , revision='bf16' , dtype=jnp.bfloataa , )
lowercase = scheduler_params
lowercase = 'A painting of a squirrel eating a burger'
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = sd_pipe.prepare_inputs(_lowerCamelCase )
lowercase = replicate(_lowerCamelCase )
lowercase = shard(_lowerCamelCase )
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(_lowerCamelCase , jax.device_count() )
lowercase = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=2_5 , jit=_lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 220
| 1
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
UpperCamelCase__ : Any = 0
UpperCamelCase__ : int = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 196
|
from ..utils import DummyObject, requires_backends
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : int = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
| 196
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase_ = TypeVar("""KEY""")
lowercase_ = TypeVar("""VAL""")
@dataclass(frozen=snake_case__ , slots=snake_case__ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
class a_ ( _Item ):
'''simple docstring'''
def __init__( self ) -> List[str]:
super().__init__(_A , _A )
def __bool__( self ) -> Any:
return False
lowercase_ = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , A = 8 , A = 0.75 ) -> int:
_SCREAMING_SNAKE_CASE = initial_block_size
_SCREAMING_SNAKE_CASE = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_SCREAMING_SNAKE_CASE = capacity_factor
_SCREAMING_SNAKE_CASE = 0
def snake_case_( self , A ) -> List[Any]:
return hash(_A ) % len(self._buckets )
def snake_case_( self , A ) -> Union[str, Any]:
return (ind + 1) % len(self._buckets )
def snake_case_( self , A , A , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self._buckets[ind]
if not stored:
_SCREAMING_SNAKE_CASE = _Item(_A , _A )
self._len += 1
return True
elif stored.key == key:
_SCREAMING_SNAKE_CASE = _Item(_A , _A )
return True
else:
return False
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_A )
def snake_case_( self ) -> Optional[Any]:
if len(self._buckets ) <= self._initial_block_size:
return False
_SCREAMING_SNAKE_CASE = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case_( self , A ) -> str:
_SCREAMING_SNAKE_CASE = self._buckets
_SCREAMING_SNAKE_CASE = [None] * new_size
_SCREAMING_SNAKE_CASE = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case_( self ) -> List[str]:
self._resize(len(self._buckets ) * 2 )
def snake_case_( self ) -> str:
self._resize(len(self._buckets ) // 2 )
def snake_case_( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = self._get_bucket_index(_A )
for _ in range(len(self._buckets ) ):
yield ind
_SCREAMING_SNAKE_CASE = self._get_next_ind(_A )
def snake_case_( self , A , A ) -> int:
for ind in self._iterate_buckets(_A ):
if self._try_set(_A , _A , _A ):
break
def __setitem__( self , A , A ) -> Tuple:
if self._is_full():
self._size_up()
self._add_item(_A , _A )
def __delitem__( self , A ) -> List[str]:
for ind in self._iterate_buckets(_A ):
_SCREAMING_SNAKE_CASE = self._buckets[ind]
if item is None:
raise KeyError(_A )
if item is _deleted:
continue
if item.key == key:
_SCREAMING_SNAKE_CASE = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , A ) -> Union[str, Any]:
for ind in self._iterate_buckets(_A ):
_SCREAMING_SNAKE_CASE = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_A )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Tuple:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ' ,'.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 58
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__A = logging.getLogger(__name__)
__A = 50 # max width of layer names
__A = 70 # max width of quantizer names
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_lowercase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_lowercase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_lowercase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_lowercase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_lowercase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_lowercase , type=_lowercase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_lowercase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
if args.calibrator == "max":
__lowerCamelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
__lowerCamelCase = "histogram"
elif args.calibrator == "mse":
__lowerCamelCase = "histogram"
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
__lowerCamelCase = QuantDescriptor(num_bits=args.aprec , calib_method=_lowercase )
__lowerCamelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowercase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowercase )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
logger.info('Configuring Model for Quantization' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowercase , ['embeddings'] , which='weight' , _disabled=_lowercase )
if args.quant_disable:
set_quantizer_by_name(_lowercase , [''] , _disabled=_lowercase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowercase , args.quant_disable_keyword , _disabled=_lowercase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowercase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_lowercase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowercase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_lowercase )
if args.recalibrate_weights:
recalibrate_weights(_lowercase )
if args.fuse_qkv:
fuse_qkv(_lowercase , _lowercase )
if args.clip_gelu:
clip_gelu(_lowercase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowercase )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowercase )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def fusea(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_lowercase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
__lowerCamelCase = qq._amax.detach().item()
__lowerCamelCase = qk._amax.detach().item()
__lowerCamelCase = qv._amax.detach().item()
__lowerCamelCase = max(_lowercase , _lowercase , _lowercase )
qq._amax.fill_(_lowercase )
qk._amax.fill_(_lowercase )
qv._amax.fill_(_lowercase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
__lowerCamelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowercase )
__lowerCamelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowercase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
__lowerCamelCase = mod.weight.shape[0]
__lowerCamelCase = mod._weight_quantizer._amax.detach()
__lowerCamelCase = torch.ones(_lowercase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowercase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowerCamelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__lowerCamelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
__lowerCamelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowercase , keepdims=_lowercase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
__lowerCamelCase = amax
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=25 , UpperCamelCase__ : List[str]=180 , UpperCamelCase__ : List[str]=None ) -> Optional[int]:
"""simple docstring"""
if ignore is None:
__lowerCamelCase = []
elif not isinstance(_lowercase , _lowercase ):
__lowerCamelCase = [ignore]
__lowerCamelCase = 0
for name, mod in model.named_modules():
if not hasattr(_lowercase , 'weight' ):
continue
__lowerCamelCase = max(_lowercase , len(_lowercase ) )
for name, mod in model.named_modules():
__lowerCamelCase = getattr(_lowercase , '_input_quantizer' , _lowercase )
__lowerCamelCase = getattr(_lowercase , '_weight_quantizer' , _lowercase )
if not hasattr(_lowercase , 'weight' ):
continue
if type(_lowercase ) in ignore:
continue
if [True for s in ignore if type(_lowercase ) is str and s in name]:
continue
__lowerCamelCase = F"""Act:{input_q.extra_repr()}"""
__lowerCamelCase = F"""Wgt:{weight_q.extra_repr()}"""
__lowerCamelCase = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowercase ) <= line_width:
logger.info(_lowercase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{' ':{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = 0
for name, mod in model.named_modules():
if isinstance(_lowercase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
__lowerCamelCase = getattr(_lowercase , _lowercase , _lowercase )
if quantizer_mod is not None:
assert hasattr(_lowercase , _lowercase )
setattr(_lowercase , _lowercase , _lowercase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple="both" , **UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowercase , _lowercase , '_input_quantizer' , _lowercase , _lowercase )
if which in ["weight", "both"]:
set_quantizer(_lowercase , _lowercase , '_weight_quantizer' , _lowercase , _lowercase )
logger.info(_lowercase )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowercase , '_input_quantizer' ) or hasattr(_lowercase , '_weight_quantizer' ):
for n in names:
if re.search(_lowercase , _lowercase ):
set_quantizers(_lowercase , _lowercase , **_lowercase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_lowercase , _lowercase ):
__lowerCamelCase = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_lowercase , _lowercase , _lowercase )
logger.info(_lowercase )
| 363
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
__A = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A = []
__A = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
elif "running_mean" in name:
__lowerCamelCase = 'running_mean'
elif "running_var" in name:
__lowerCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCamelCase = 'num_batches_tracked'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 1876
__lowerCamelCase = 600
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 1876
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 348
| 0
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ : List[Any] = get_logger(__name__)
class _snake_case ( enum.Enum ):
_lowercase : Any = '''all_checks'''
_lowercase : str = '''basic_checks'''
_lowercase : str = '''no_checks'''
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if expected_checksums is None:
logger.info('Unable to verify checksums.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE = ' for ' + verification_name if verification_name is not None else ''
if len(_UpperCAmelCase) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error')
logger.info('All the checksums matched successfully' + for_verification_name)
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if expected_splits is None:
logger.info('Unable to verify splits sizes.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase))
logger.info('All the splits matched successfully.')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = True):
if record_checksum:
SCREAMING_SNAKE_CASE = shaaaa()
with open(_UpperCAmelCase , 'rb') as f:
for chunk in iter(lambda: f.read(1 << 20) , B''):
m.update(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = m.hexdigest()
else:
SCREAMING_SNAKE_CASE = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase), "checksum": checksum}
def lowerCamelCase__ (_UpperCAmelCase):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 137
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["image_processor", "tokenizer"]
__magic_name__ = "CLIPImageProcessor"
__magic_name__ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case__ , )
_lowerCAmelCase : Tuple = kwargs.pop('feature_extractor' )
_lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase : Dict = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
_lowerCAmelCase : Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
_lowerCAmelCase : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.tokenizer.model_input_names
_lowerCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 25
|
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : Optional[Any] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids , lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Optional[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : Any = DisjunctiveConstraint(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = dc.update(1 )
UpperCAmelCase_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(2 )
UpperCAmelCase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : str = DisjunctiveConstraint(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 61
|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['LayoutLMv2FeatureExtractor']
SCREAMING_SNAKE_CASE__ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183
| 1
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase = "bart"
UpperCAmelCase = True
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def lowercase ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def lowercase ( ) -> Any:
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCamelCase = faiss.IndexFlatIP(128 )
_UpperCamelCase = faiss.index_cpu_to_gpu(lowerCAmelCase__ , 1 , lowerCAmelCase__ )
wikiaab_gpu_index_flat.add(lowerCAmelCase__ ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def lowercase ( ) -> Dict:
_UpperCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCamelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase__ )
return (elia_train, eli5_train_q_index)
UpperCAmelCase = load_indexes()
UpperCAmelCase = load_models()
UpperCAmelCase = load_train_data()
def lowercase ( a__ : List[str] , a__ : Union[str, Any]=10 ) -> Optional[int]:
_UpperCamelCase = embed_questions_for_retrieval([question] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = [elia_train[int(lowerCAmelCase__ )] for i in I[0]]
return nn_examples
def lowercase ( a__ : Any , a__ : Dict="wiki40b" , a__ : Optional[Any]="dense" , a__ : Dict=10 ) -> Any:
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
lowerCAmelCase__ , lowerCAmelCase__ , index_name='''english_wiki40b_snippets_100w''' , n_results=lowerCAmelCase__ , )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a__ : None),
} )
def lowercase ( a__ : int , a__ : int , a__ : Optional[Any] , a__ : List[str]=64 , a__ : int=256 , a__ : List[Any]=False , a__ : int=2 , a__ : Optional[Any]=0.95 , a__ : List[str]=0.8 ) -> Dict:
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_answers=1 , num_beams=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ , do_sample=lowerCAmelCase__ , temp=lowerCAmelCase__ , top_p=lowerCAmelCase__ , top_k=lowerCAmelCase__ , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
UpperCAmelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase = st.sidebar.checkbox("""Demo options""")
if demo_options:
UpperCAmelCase = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
UpperCAmelCase = action_list.index(action_st)
UpperCAmelCase = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
UpperCAmelCase = show_type == "Show full text of passages"
else:
UpperCAmelCase = 3
UpperCAmelCase = True
UpperCAmelCase = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
UpperCAmelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
UpperCAmelCase = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
UpperCAmelCase = "wiki40b"
UpperCAmelCase = "dense"
UpperCAmelCase = "beam"
UpperCAmelCase = 2
UpperCAmelCase = 64
UpperCAmelCase = 256
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = st.sidebar.checkbox("""Generation options""")
if generate_options:
UpperCAmelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
UpperCAmelCase = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase = None
# start main text
UpperCAmelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase = st.text_input("""Enter your question here:""", """""")
else:
UpperCAmelCase = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase = make_support(question, source=wiki_source, method="""dense""", n_results=10)
UpperCAmelCase = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
UpperCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase = support_list[:10]
UpperCAmelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
UpperCAmelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(""" """, """_"""))
UpperCAmelCase = res[1].strip()
if sec_titles == "":
UpperCAmelCase = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase = sec_titles.split(""" & """)
UpperCAmelCase = " & ".join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase = find_nearest_training(question)
UpperCAmelCase = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
UpperCAmelCase = [
"{}. {}".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
UpperCAmelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 256
|
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def _A () -> None:
'''simple docstring'''
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 168
| 0
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = R"\w+[.]\d+"
_A = re.findall(__lowercase , __lowercase )
for pat in pats:
_A = key.replace(__lowercase , "_".join(pat.split("." ) ) )
return key
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_A = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_A = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_A = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_A = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_A = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_A = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
_A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_A = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_A = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowercase ( __lowercase , __lowercase , __lowercase=42 ) -> Tuple:
'''simple docstring'''
_A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_A = flax_model.init_weights(PRNGKey(__lowercase ) )
_A = flatten_dict(__lowercase )
_A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_A = rename_key(__lowercase )
_A = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
_A , _A = rename_key_and_reshape_tensor(__lowercase , __lowercase , __lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_A = jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
| 174
|
'''simple docstring'''
lowerCamelCase_ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 174
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCAmelCase : List[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
for attribute in key.split("." ):
a__ : Optional[Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__ : Optional[Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
a__ : Optional[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
a__ : Optional[int] =value
elif weight_type == "weight_g":
a__ : List[str] =value
elif weight_type == "weight_v":
a__ : Any =value
elif weight_type == "bias":
a__ : Optional[int] =value
else:
a__ : int =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : int =[]
a__ : Optional[Any] =fairseq_model.state_dict()
a__ : Any =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a__ : List[str] =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
a__ : Any =True
else:
for key, mapped_key in MAPPING.items():
a__ : str ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
a__ : Tuple =True
if "*" in mapped_key:
a__ : Union[str, Any] =name.split(SCREAMING_SNAKE_CASE )[0].split("." )[-2]
a__ : Any =mapped_key.replace("*" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__ : Any ="weight_g"
elif "weight_v" in name:
a__ : Union[str, Any] ="weight_v"
elif "bias" in name:
a__ : Tuple ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ : int ="weight"
else:
a__ : Optional[Any] =None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
a__ : Optional[Any] =full_name.split("conv_layers." )[-1]
a__ : Dict =name.split("." )
a__ : str =int(items[0] )
a__ : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
a__ : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
a__ : str =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
a__ : int =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
a__ : List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any=True ):
"""simple docstring"""
if config_path is not None:
a__ : Optional[int] =UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
a__ : Optional[Any] =UniSpeechSatConfig()
a__ : List[Any] =""
if is_finetuned:
a__ : Optional[int] =UniSpeechSatForCTC(SCREAMING_SNAKE_CASE )
else:
a__ : int =UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE )
a__ , a__ , a__ : str =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
a__ : Optional[Any] =model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 95
|
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 1
|
'''simple docstring'''
import os
lowerCamelCase : str = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
def _lowerCAmelCase ( _UpperCamelCase : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
while index < len(_UpperCamelCase ) - 1:
_SCREAMING_SNAKE_CASE =SYMBOLS[numerals[index]]
_SCREAMING_SNAKE_CASE =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase ( _UpperCamelCase : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =num // 10_00
numerals += m_count * "M"
num %= 10_00
_SCREAMING_SNAKE_CASE =num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
_SCREAMING_SNAKE_CASE =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase ( _UpperCamelCase : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
with open(os.path.dirname(_UpperCamelCase ) + roman_numerals_filename ) as filea:
_SCREAMING_SNAKE_CASE =filea.readlines()
for line in lines:
_SCREAMING_SNAKE_CASE =line.strip()
_SCREAMING_SNAKE_CASE =parse_roman_numerals(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =generate_roman_numerals(_UpperCamelCase )
savings += len(_UpperCamelCase ) - len(_UpperCamelCase )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads'''))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_encoder_blocks'''))
class _A :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : str=64 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE : List[str]=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE : Any=[16, 32, 64, 128] , __SCREAMING_SNAKE_CASE : List[Any]=[1, 4, 8, 16] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 2, 4, 8] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : int=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = num_encoder_blocks
__a = sr_ratios
__a = depths
__a = hidden_sizes
__a = downsampling_rates
__a = num_attention_heads
__a = is_training
__a = use_labels
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = num_labels
__a = scope
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = SegformerModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
__a = __a = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = self.num_labels
__a = SegformerForSemanticSegmentation(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = 1
__a = SegformerForSemanticSegmentation(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Optional[int] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : int = False
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = SegformerModelTester(self)
__a = SegformerConfigTester(self , config_class=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__SCREAMING_SNAKE_CASE)
@unittest.skip('''SegFormer does not use inputs_embeds''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
__a = sum(self.model_tester.depths)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# verify the first attentions (first block, first layer)
__a = (self.model_tester.image_size // 4) ** 2
__a = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__a = (self.model_tester.image_size // 32) ** 2
__a = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__a = len(__SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
self.assertEqual(out_len + 1 , len(__SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# verify the first attentions (first block, first layer)
__a = (self.model_tester.image_size // 4) ** 2
__a = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]):
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.hidden_states
__a = self.model_tester.num_encoder_blocks
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
if not self.model_tester.is_training:
return
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE):
continue
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.train()
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE)
__a = model(**__SCREAMING_SNAKE_CASE).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = SegformerModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE)
__a = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''').to(
__SCREAMING_SNAKE_CASE)
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE)
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE)
__a = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE)
__a = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''').to(__SCREAMING_SNAKE_CASE)
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE)
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE)
__a = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-1))
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE)
__a = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''').to(
__SCREAMING_SNAKE_CASE)
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE)
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE)
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)])
__a = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
__a = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE)
__a = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
| 131
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
UpperCamelCase__ : int
UpperCamelCase__ : TreeNode | None = None
UpperCamelCase__ : TreeNode | None = None
__snake_case :Optional[Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __snake_case ( _UpperCAmelCase ):
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__a , __a = get_distrib(node.left )
__a , __a = get_distrib(node.right )
__a = 1 - left_distrib_excess
__a = 1 - right_distrib_excess
__a = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
__a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
def __get__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCamelCase__ : str = "__cached_" + self.fget.__name__
lowerCamelCase__ : str = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if cached is None:
lowerCamelCase__ : int = self.fget(__SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cached
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
if is_torch_fx_proxy(_UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_UpperCAmelCase , np.ndarray )
def lowercase_ ( _A : int ):
"""simple docstring"""
return isinstance(_UpperCAmelCase , np.ndarray )
def lowercase_ ( _A : str ):
"""simple docstring"""
return _is_numpy(_UpperCAmelCase )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
import torch
return isinstance(_UpperCAmelCase , torch.Tensor )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_UpperCAmelCase )
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
import torch
return isinstance(_UpperCAmelCase , torch.device )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_UpperCAmelCase )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
import torch
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : List[str] = getattr(_UpperCAmelCase , _UpperCAmelCase )
else:
return False
return isinstance(_UpperCAmelCase , torch.dtype )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_UpperCAmelCase )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
import tensorflow as tf
return isinstance(_UpperCAmelCase , tf.Tensor )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_UpperCAmelCase )
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_UpperCAmelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_UpperCAmelCase )
return type(_UpperCAmelCase ) == tf.Tensor
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCAmelCase )
def lowercase_ ( _A : str ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_UpperCAmelCase , jnp.ndarray )
def lowercase_ ( _A : str ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_UpperCAmelCase )
def lowercase_ ( _A : Union[str, Any] ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return [to_py_obj(_UpperCAmelCase ) for o in obj]
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase ).tolist()
elif isinstance(_UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return np.array(_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase )
else:
return obj
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = fields(self )
# Safety and consistency checks
if not len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
lowerCamelCase__ : List[str] = getattr(self , class_fields[0].name )
lowerCamelCase__ : Optional[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Dict = first_field.items()
lowerCamelCase__ : Union[str, Any] = True
else:
try:
lowerCamelCase__ : Union[str, Any] = iter(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple = True
except TypeError:
lowerCamelCase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__SCREAMING_SNAKE_CASE ):
if (
not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(__SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , __SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__ : Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase__ : Dict = element[1]
elif first_field is not None:
lowerCamelCase__ : Union[str, Any] = first_field
else:
for field in class_fields:
lowerCamelCase__ : Optional[Any] = getattr(self , field.name )
if v is not None:
lowerCamelCase__ : Union[str, Any] = v
def __delitem__( self : str , *__lowerCamelCase : Any , **__lowerCamelCase : str ):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase ( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase ( self : Dict , *__lowerCamelCase : int , **__lowerCamelCase : int ):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def lowerCAmelCase ( self : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : Tuple , __lowerCamelCase : Dict ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Any , __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __setitem__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : int , __lowerCamelCase : int ):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''longest'''
A__ = '''max_length'''
A__ = '''do_not_pad'''
class _lowercase ( __UpperCAmelCase):
"""simple docstring"""
A__ = '''pt'''
A__ = '''tf'''
A__ = '''np'''
A__ = '''jax'''
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[ContextManager] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = context_managers
lowerCamelCase__ : str = ExitStack()
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__SCREAMING_SNAKE_CASE )
def __exit__( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ):
'''simple docstring'''
self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int = infer_framework(_UpperCAmelCase )
if framework == "tf":
lowerCamelCase__ : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( _A : Any ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = model_class.__name__
lowerCamelCase__ : List[Any] = infer_framework(_UpperCAmelCase )
if framework == "tf":
lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( _A : Dict , _A : Optional[int] = "" , _A : str = "." ):
"""simple docstring"""
def _flatten_dict(_A : int , _A : Tuple="" , _A : List[Any]="." ):
for k, v in d.items():
lowerCamelCase__ : Optional[int] = str(_UpperCAmelCase ) + delimiter + str(_UpperCAmelCase ) if parent_key else k
if v and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
yield from flatten_dict(_UpperCAmelCase , _UpperCAmelCase , delimiter=_UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
@contextmanager
def lowercase_ ( _A : List[Any] , _A : List[Any] = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( _A : Tuple , _A : Optional[Any]=None ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.T if axes is None else array.permute(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(_UpperCAmelCase , perm=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_UpperCAmelCase )}." )
def lowercase_ ( _A : Tuple , _A : int ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.reshape(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.reshape(_UpperCAmelCase , _UpperCAmelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_UpperCAmelCase )}." )
def lowercase_ ( _A : int , _A : str=None ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_UpperCAmelCase )}." )
def lowercase_ ( _A : List[str] , _A : List[str] ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.expand_dims(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.unsqueeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_UpperCAmelCase )}." )
def lowercase_ ( _A : Dict ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.size(_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.size(_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_UpperCAmelCase )}." )
def lowercase_ ( _A : Tuple , _A : List[Any] ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_UpperCAmelCase , (tuple, list) ):
lowerCamelCase__ : str = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__ : Union[str, Any] = F"{repo_id}--{value}"
return auto_map
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
for base_class in inspect.getmro(_UpperCAmelCase ):
lowerCamelCase__ : Any = base_class.__module__
lowerCamelCase__ : Optional[int] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 184
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 0
|
snake_case__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case__ ( lowerCamelCase__ : str ) -> int:
A_ : Tuple = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
A_ : int = Stack()
A_ : Any = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
A_ : Tuple = operator_stack.peek()
operator_stack.pop()
A_ : int = operand_stack.peek()
operand_stack.pop()
A_ : Dict = operand_stack.peek()
operand_stack.pop()
A_ : List[str] = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 353
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''vivit'''
def __init__( self , A=224 , A=32 , A=[2, 16, 16] , A=3 , A=768 , A=12 , A=12 , A=3072 , A="gelu_fast" , A=0.0 , A=0.0 , A=0.02 , A=1e-06 , A=True , **A , ) -> int:
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = tubelet_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**A )
| 58
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a_ :
'''simple docstring'''
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=40 , A=2 , A=1 , A=0 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self , A , A ) -> int:
_SCREAMING_SNAKE_CASE = TFPegasusModel(config=A ).get_decoder()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , head_mask=A , use_cache=A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )[0]
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ) ->int:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = TFPegasusModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def snake_case_( self ) -> List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_( self , **A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.translate_src_text(**A )
assert self.expected_text == generated_words
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **A , padding=A , return_tensors="""tf""" )
_SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )
return generated_words
@slow
def snake_case_( self ) -> Any:
self._assert_generated_batch_equal_expected()
| 58
| 1
|
def lowerCAmelCase__ ( _a : int ):
snake_case_ : List[str] = abs(_a )
snake_case_ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase__ ( _a : int ):
snake_case_ : Optional[Any] = abs(_a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase__ ( _a : int ):
return sum(int(_a ) for c in str(abs(_a ) ) )
def lowerCAmelCase__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
snake_case_ : List[str] = F'''{func.__name__}({value})'''
snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="import __main__" )
print(F'''{call:56} = {func(_a )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 350
|
lowercase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 36
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "LayoutLMv3ImageProcessor"
_lowerCamelCase = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowerCamelCase_ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ = features["words"]
lowerCamelCase_ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel values
lowerCamelCase_ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCamelCase_ = self.get_overflowing_images(UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCamelCase_ = images
return encoded_inputs
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 55
|
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : float = 1 / 12345 ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 3
while True:
lowerCamelCase_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase_ ):
lowerCamelCase_ = int(UpperCAmelCase_ )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13
| 1
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """EncodecFeatureExtractor"""
_lowerCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
super().__init__(__magic_name__ , __magic_name__ )
_a = self.feature_extractor
_a = False
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> List[str]:
return self.tokenizer.get_decoder_prompt_ids(task=__magic_name__ , language=__magic_name__ , no_timestamps=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__magic_name__ , **__magic_name__ )
_a = kwargs.pop('audio' , __magic_name__ )
_a = kwargs.pop('sampling_rate' , __magic_name__ )
_a = kwargs.pop('text' , __magic_name__ )
if len(__magic_name__ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
_a = self.tokenizer(__magic_name__ , **__magic_name__ )
if audio is not None:
_a = self.feature_extractor(__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , **__magic_name__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_a = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_a = audio_inputs['padding_mask']
return inputs
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
_a = kwargs.pop('audio' , __magic_name__ )
_a = kwargs.pop('padding_mask' , __magic_name__ )
if len(__magic_name__ ) > 0:
_a = args[0]
_a = args[1:]
if audio_values is not None:
return self._decode_audio(__magic_name__ , padding_mask=__magic_name__ )
else:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[np.ndarray]:
_a = to_numpy(__magic_name__ )
_a , _a , _a = audio_values.shape
if padding_mask is None:
return list(__magic_name__ )
_a = to_numpy(__magic_name__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_a = seq_len - padding_mask.shape[-1]
_a = 1 - self.feature_extractor.padding_value
_a = np.pad(__magic_name__ , ((0, 0), (0, difference)) , 'constant' , constant_values=__magic_name__ )
_a = audio_values.tolist()
for i in range(__magic_name__ ):
_a = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_a = sliced_audio.reshape(__magic_name__ , -1 )
return audio_values
| 168
|
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A () -> Tuple:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
lowercase = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = [False] * len(lowerCAmelCase__ )
UpperCamelCase__ = [s]
UpperCamelCase__ = True
while queue:
UpperCamelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase__ )
UpperCamelCase__ = True
UpperCamelCase__ = u
return visited[t]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = [-1] * (len(lowerCAmelCase__ ))
UpperCamelCase__ = 0
UpperCamelCase__ = []
UpperCamelCase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ ):
UpperCamelCase__ = float('''Inf''' )
UpperCamelCase__ = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase__ = min(lowerCAmelCase__, graph[parent[s]][s] )
UpperCamelCase__ = parent[s]
max_flow += path_flow
UpperCamelCase__ = sink
while v != source:
UpperCamelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase__ = parent[v]
for i in range(len(lowerCAmelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 365
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
_A : Any = '''maskformer'''
_A : Any = {'''hidden_size''': '''mask_feature_size'''}
_A : List[str] = ['''resnet''', '''swin''']
_A : Tuple = ['''detr''']
def __init__( self : Optional[Any] , _a : int = 256 , _a : int = 256 , _a : float = 0.1 , _a : bool = False , _a : Optional[Dict] = None , _a : Optional[Dict] = None , _a : float = 0.02 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 20.0 , _a : Optional[bool] = None , **_a : List[str] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
UpperCamelCase__ = backbone_config.pop('''model_type''' )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_a , _a ):
UpperCamelCase__ = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ = config_class.from_dict(_a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = decoder_config
# main feature dimension for the model
UpperCamelCase__ = fpn_feature_size
UpperCamelCase__ = mask_feature_size
# initializer
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ = cross_entropy_weight
UpperCamelCase__ = dice_weight
UpperCamelCase__ = mask_weight
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = no_object_weight
UpperCamelCase__ = output_auxiliary_logits
UpperCamelCase__ = self.decoder_config.encoder_attention_heads
UpperCamelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def A_ ( cls : Tuple , _a : PretrainedConfig , _a : PretrainedConfig , **_a : str ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def A_ ( self : str ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.decoder_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35
| 0
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ = '3.0.12'
lowerCAmelCase_ = None
def snake_case( ) -> int:
'''simple docstring'''
global _logger
lowercase : Optional[int] = _logger or logging.getLogger(__name__ )
return _logger
class _A ( _lowerCamelCase ):
def __init__( self : str , _A : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : int = lock_file
return None
def __str__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : str = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _A :
def __init__( self : Union[str, Any] , _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = lock
return None
def __enter__( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.lock
def __exit__( self : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : int ) -> Any:
"""simple docstring"""
self.lock.release()
return None
class _A :
def __init__( self : Union[str, Any] , _A : Optional[int] , _A : Optional[int]=-1 , _A : int=None ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase : Dict = self.hash_filename_if_too_long(_A , _A )
# The path to the lock file.
lowercase : Union[str, Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase : str = None
# The default timeout value.
lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
lowercase : List[str] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase : Union[str, Any] = 0
return None
@property
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._lock_file
@property
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def __a ( self : str , _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = float(_A )
return None
def __a ( self : List[str] ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
@property
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
return self._lock_file_fd is not None
def __a ( self : List[Any] , _A : str=None , _A : int=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase : Dict = id(self )
lowercase : Dict = self._lock_file
lowercase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(_A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __a ( self : Optional[Any] , _A : str=False ) -> Union[str, Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase : List[str] = id(self )
lowercase : Union[str, Any] = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowercase : Union[str, Any] = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : str ) -> Tuple:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Any , _A : str , _A : Any , _A : str ) -> int:
"""simple docstring"""
self.release()
return None
def __del__( self : int ) -> int:
"""simple docstring"""
self.release(force=_A )
return None
def __a ( self : int , _A : str , _A : int ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = os.path.basename(_A )
if len(_A ) > max_length and max_length > 0:
lowercase : str = os.path.dirname(_A )
lowercase : List[Any] = str(hash(_A ) )
lowercase : str = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(_A , _A )
else:
return path
class _A ( _lowerCamelCase ):
def __init__( self : Dict , _A : List[Any] , _A : int=-1 , _A : Optional[Any]=None ) -> Any:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(_A , timeout=_A , max_filename_length=_A )
lowercase : Optional[Any] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase : Union[str, Any] = os.open(self._lock_file , _A )
except OSError:
pass
else:
try:
msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_A )
else:
lowercase : str = fd
return None
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = self._lock_file_fd
lowercase : Optional[int] = None
msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 )
os.close(_A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : List[Any]=-1 , _A : Optional[int]=None ) -> str:
"""simple docstring"""
lowercase : List[Any] = os.statvfs(os.path.dirname(_A ) ).f_namemax
super().__init__(_A , timeout=_A , max_filename_length=_A )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase : Dict = os.open(self._lock_file , _A )
try:
fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_A )
else:
lowercase : int = fd
return None
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self._lock_file_fd
lowercase : Dict = None
fcntl.flock(_A , fcntl.LOCK_UN )
os.close(_A )
return None
class _A ( _lowerCamelCase ):
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase : int = os.open(self._lock_file , _A )
except OSError:
pass
else:
lowercase : Any = fd
return None
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
os.close(self._lock_file_fd )
lowercase : List[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ = None
if msvcrt:
lowerCAmelCase_ = WindowsFileLock
elif fcntl:
lowerCAmelCase_ = UnixFileLock
else:
lowerCAmelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaImgaImgPipeline
lowerCamelCase__ = ["""image_embeds""", """negative_image_embeds""", """image"""]
lowerCamelCase__ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowerCamelCase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase__ = False
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return self.time_input_dim
@property
def A_ ( self ):
return self.time_input_dim * 4
@property
def A_ ( self ):
return 100
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : int = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**lowercase )
return model
@property
def A_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self ):
_lowerCamelCase : str = self.dummy_unet
_lowerCamelCase : Union[str, Any] = self.dummy_movq
_lowerCamelCase : str = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowerCamelCase : Union[str, Any] = DDIMScheduler(**lowercase )
_lowerCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self , lowercase , lowercase=0 ):
_lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
_lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : Any = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((256, 256) )
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : int = torch.manual_seed(lowercase )
else:
_lowerCamelCase : int = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : List[str] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def A_ ( self ):
_lowerCamelCase : List[Any] = 'cpu'
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : List[Any] = self.pipeline_class(**lowercase )
_lowerCamelCase : Dict = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Any = pipe(**self.get_dummy_inputs(lowercase ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
_lowerCamelCase : int = image[0, -3:, -3:, -1]
_lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCamelCase : List[str] = 'A red cartoon frog, 4k'
_lowerCamelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
_lowerCamelCase : List[str] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_lowerCamelCase : Dict = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase, _lowerCamelCase : List[str] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCamelCase : Optional[int] = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 12
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[Any] = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95
| 1
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : str = [int(UpperCAmelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCAmelCase_ ) == 4 and all(0 <= int(UpperCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCamelCase : str = input().strip()
lowerCamelCase : Dict = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 357
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176
| 0
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
snake_case__ , padding="longest" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case__ ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config["num_epochs"] )
UpperCamelCase = int(config["seed"] )
UpperCamelCase = int(config["batch_size"] )
UpperCamelCase = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(_SCREAMING_SNAKE_CASE ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case__ )
UpperCamelCase , UpperCamelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**snake_case__ )
UpperCamelCase = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**snake_case__ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case__ , default=snake_case__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Tuple = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : Any = AudioLDMPipeline
lowercase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowercase : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase : Tuple = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase : Optional[int] = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a_ ( self ):
UpperCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : int = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Tuple = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[str] = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
UpperCamelCase : Tuple = prompt_embeds
# forward
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * ["""this is a negative prompt"""]
UpperCamelCase : List[Any] = negative_prompt
UpperCamelCase : str = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : str = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
UpperCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCamelCase : int = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Union[str, Any] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
embeds.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Tuple = embeds
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = """egg cracking"""
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Union[str, Any] = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase : List[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase : Dict = 2
UpperCamelCase : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase : Any = 2
UpperCamelCase : str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.032
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = ["""hey"""]
UpperCamelCase : Dict = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : str = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase : str = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a_ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@slow
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a_ ( self ):
UpperCamelCase : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 25
UpperCamelCase : Optional[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[7_7230:7_7240]
UpperCamelCase : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a_ ( self ):
UpperCamelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase : str = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[2_7780:2_7790]
UpperCamelCase : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 27
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='yolos'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=[5_12, 8_64] , a_=16 , a_=3 , a_=True , a_=1_00 , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=5 , a_=2 , a_=0.1 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Tuple = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = qkv_bias
__snake_case : Union[str, Any] = num_detection_tokens
__snake_case : List[str] = use_mid_position_embeddings
__snake_case : Tuple = auxiliary_loss
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : int = bbox_cost
__snake_case : int = giou_cost
# Loss coefficients
__snake_case : Optional[int] = bbox_loss_coefficient
__snake_case : List[str] = giou_loss_coefficient
__snake_case : List[Any] = eos_coefficient
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 12
| 102
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''rwkv'''
__magic_name__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = rescale_every
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 268
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
lowercase__ : str = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : Dict = image_size
lowercase__ : int = min_resolution
lowercase__ : int = max_resolution
lowercase__ : str = do_resize
lowercase__ : List[Any] = size if size is not None else {'''height''': 18, '''width''': 20}
lowercase__ : Tuple = do_thumbnail
lowercase__ : Dict = do_align_axis
lowercase__ : str = do_pad
lowercase__ : str = do_normalize
lowercase__ : Any = image_mean
lowercase__ : str = image_std
def _lowerCAmelCase( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = DonutImageProcessor if is_vision_available() else None
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[Any] = DonutImageProcessingTester(self )
@property
def _lowerCAmelCase( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@is_flaky()
def _lowerCAmelCase( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCAmelCase( self ) -> Union[str, Any]:
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase__ : Dict = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCAmelCase( self ) -> str:
# Initialize image_processing
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase__ : int = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 214
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__a: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : str = {}
if "candidate_labels" in kwargs:
lowercase__ : str = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : str = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="This is a photo of {}." ) -> Any:
lowercase__ : Union[str, Any] = load_image(__lowerCAmelCase )
lowercase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Union[str, Any] = candidate_labels
lowercase__ : int = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
lowercase__ : Any = [text_inputs]
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Any = model_inputs.pop('''candidate_labels''' )
lowercase__ : int = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
lowercase__ : Optional[Any] = text_inputs[0][0]
lowercase__ : Any = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Union[str, Any] = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Any = probs.tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Dict = [scores]
elif self.framework == "tf":
lowercase__ : List[Any] = stable_softmax(__lowerCAmelCase , axis=-1 )
lowercase__ : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowercase__ : Optional[int] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 214
| 1
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_a : Optional[int] = str(bin(lowerCAmelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
_a : Tuple = str(bin(lowerCAmelCase_ ) )[2:]
if shift_amount >= len(lowerCAmelCase_ ):
return "0b0"
_a : Optional[int] = binary_number[: len(lowerCAmelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if number >= 0: # Get binary representation of positive number
_a : Optional[Any] = '0' + str(bin(lowerCAmelCase_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
_a : List[str] = len(bin(lowerCAmelCase_ )[3:] ) # Find 2's complement of number
_a : Optional[int] = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
_a : Dict = (
'1' + '0' * (binary_number_length - len(lowerCAmelCase_ )) + binary_number
)
if shift_amount >= len(lowerCAmelCase_ ):
return "0b" + binary_number[0] * len(lowerCAmelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCAmelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
| 1
|
"""simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( __lowerCamelCase : float ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : Union[str, Any] = 1.0
while num:
_lowercase : Optional[int] = float(input("Gamma of: "))
print(f'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 368
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
a__ = """\
Text data.
Second line of data."""
a__ = """file"""
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : Dict ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_a : Optional[Any] = bytes(__a ,'''utf-8''' )
with zstd.open(__a ,'''wb''' ) as f:
f.write(__a )
return path
@pytest.fixture
def __UpperCAmelCase ( __a : Dict ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir ,__a ) ,'''w''' ) as f:
f.write(__a )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''] )
def __UpperCAmelCase ( __a : Tuple ,__a : Any ,__a : Dict ,__a : Optional[int] ,__a : Any ,__a : Tuple ) -> int:
"""simple docstring"""
_a : List[str] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_a : str = input_paths[compression_format]
_a : Tuple = tmp_path / '''cache'''
_a : Optional[Any] = DownloadConfig(cache_dir=__a ,extract_compressed_file=__a )
_a : Union[str, Any] = cached_path(__a ,download_config=__a )
with open(__a ) as f:
_a : Tuple = f.read()
with open(__a ) as f:
_a : str = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' ,[True, False] )
@pytest.mark.parametrize('''default_cache_dir''' ,[True, False] )
def __UpperCAmelCase ( __a : List[str] ,__a : Any ,__a : Optional[Any] ,__a : str ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = '''custom_cache'''
_a : Any = '''custom_extracted_dir'''
_a : Dict = tmp_path / '''custom_extracted_path'''
if default_extracted:
_a : Tuple = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,__a )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(__a ) )
_a : Tuple = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a : List[str] = xz_file
_a : str = (
DownloadConfig(extract_compressed_file=__a )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=__a )
)
_a : Any = cached_path(__a ,download_config=__a )
assert Path(__a ).parent.parts[-2:] == expected
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
_a : List[Any] = str(Path(__a ).resolve() )
assert cached_path(__a ) == text_file
# relative path
_a : List[Any] = str(Path(__a ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__a ) == text_file
def __UpperCAmelCase ( __a : Any ) -> Tuple:
"""simple docstring"""
_a : int = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__a ):
cached_path(__a )
# relative path
_a : Any = '''./__missing_file__.txt'''
with pytest.raises(__a ):
cached_path(__a )
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
_a : List[str] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__a ) as f:
_a : Optional[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
with pytest.raises(__a ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( __a : int ) -> str:
"""simple docstring"""
_a : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__a ):
http_get('''https://huggingface.co''' ,temp_file=__a )
with pytest.raises(__a ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( __a : List[str] ) -> Tuple:
"""simple docstring"""
_a : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__a ):
ftp_get('''ftp://huggingface.co''' ,temp_file=__a )
with pytest.raises(__a ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( __a : Any ) -> List[str]:
"""simple docstring"""
_a : int = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__a ):
fsspec_get('''s3://huggingface.co''' ,temp_file=__a )
with pytest.raises(__a ):
fsspec_head('''s3://huggingface.co''' )
| 235
|
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
while second != 0:
a =first & second
first ^= second
a =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Dict = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : List[Any] = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 81
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case_ = {"""unk_token""": """<unk>"""}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
snake_case_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
snake_case_ = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCamelCase )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCamelCase )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = self.get_image_processor(do_normalize=lowerCamelCase )
snake_case_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCamelCase , return_tensors="""np""" )
snake_case_ = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case_ = """lower newer"""
snake_case_ = processor(text=lowerCamelCase , return_tensors="""np""" )
snake_case_ = tokenizer(lowerCamelCase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case_ = """lower newer"""
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = """google/owlvit-base-patch32"""
snake_case_ = OwlViTProcessor.from_pretrained(lowerCamelCase )
snake_case_ = ["""cat""", """nasa badge"""]
snake_case_ = processor(text=lowerCamelCase )
snake_case_ = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = """google/owlvit-base-patch32"""
snake_case_ = OwlViTProcessor.from_pretrained(lowerCamelCase )
snake_case_ = [["""cat""", """nasa badge"""], ["""person"""]]
snake_case_ = processor(text=lowerCamelCase )
snake_case_ = 16
snake_case_ = len(lowerCamelCase )
snake_case_ = max([len(lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = """google/owlvit-base-patch32"""
snake_case_ = OwlViTProcessor.from_pretrained(lowerCamelCase )
snake_case_ = ["""cat""", """nasa badge"""]
snake_case_ = processor(text=lowerCamelCase )
snake_case_ = 16
snake_case_ = inputs["""input_ids"""]
snake_case_ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case_ = self.prepare_image_inputs()
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(images=lowerCamelCase , query_images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = OwlViTProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCamelCase )
snake_case_ = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 34
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCamelCase_ = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = EfficientNetConfig()
snake_case_ = CONFIG_MAP[model_name]["""hidden_dim"""]
snake_case_ = CONFIG_MAP[model_name]["""width_coef"""]
snake_case_ = CONFIG_MAP[model_name]["""depth_coef"""]
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = CONFIG_MAP[model_name]["""dropout_rate"""]
snake_case_ = CONFIG_MAP[model_name]["""dw_padding"""]
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = 1000
snake_case_ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(lowercase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase( ) -> Tuple:
'''simple docstring'''
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
def UpperCamelCase( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowercase_ , )
return preprocessor
def UpperCamelCase( lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
snake_case_ = sorted(set(lowercase_ ) )
snake_case_ = len(lowercase_ )
snake_case_ = {b: str(lowercase_ ) for b, i in zip(lowercase_ , range(lowercase_ ) )}
snake_case_ = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
snake_case_ = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
snake_case_ = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case_ = """efficientnet.""" + item[1]
snake_case_ = """classifier.weight"""
snake_case_ = """classifier.bias"""
return key_mapping
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case_ = torch.from_numpy(lowercase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case_ = torch.from_numpy(lowercase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case_ = torch.from_numpy(np.transpose(lowercase_ ) )
else:
snake_case_ = torch.from_numpy(lowercase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase_ )
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
snake_case_ = model_classes[model_name](
include_top=lowercase_ , weights="""imagenet""" , input_tensor=lowercase_ , input_shape=lowercase_ , pooling=lowercase_ , classes=1000 , classifier_activation="""softmax""" , )
snake_case_ = original_model.trainable_variables
snake_case_ = original_model.non_trainable_variables
snake_case_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case_ = param.numpy()
snake_case_ = list(tf_params.keys() )
# Load HuggingFace model
snake_case_ = get_efficientnet_config(lowercase_ )
snake_case_ = EfficientNetForImageClassification(lowercase_ ).eval()
snake_case_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
snake_case_ = rename_keys(lowercase_ )
replace_params(lowercase_ , lowercase_ , lowercase_ )
# Initialize preprocessor and preprocess input image
snake_case_ = convert_image_processor(lowercase_ )
snake_case_ = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case_ = hf_model(**lowercase_ )
snake_case_ = outputs.logits.detach().numpy()
# Original model inference
snake_case_ = False
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case_ = image.img_to_array(lowercase_ )
snake_case_ = np.expand_dims(lowercase_ , axis=0 )
snake_case_ = original_model.predict(lowercase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase_ , lowercase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase_ ):
os.mkdir(lowercase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase_ )
preprocessor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
snake_case_ = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase_ )
hf_model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCamelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """mctct"""
def __init__( self , __UpperCAmelCase=8065 , __UpperCAmelCase=1536 , __UpperCAmelCase=36 , __UpperCAmelCase=6144 , __UpperCAmelCase=4 , __UpperCAmelCase=384 , __UpperCAmelCase=920 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.3 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.3 , __UpperCAmelCase=0.3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0.3 , __UpperCAmelCase=1 , __UpperCAmelCase=(7,) , __UpperCAmelCase=(3,) , __UpperCAmelCase=80 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="sum" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = layerdrop
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
__lowerCamelCase = conv_glu_dim
__lowerCamelCase = conv_dropout
__lowerCamelCase = num_conv_layers
__lowerCamelCase = input_feat_per_channel
__lowerCamelCase = input_channels
__lowerCamelCase = conv_channels
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 330
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 1
|
def a_ ( lowerCAmelCase_ : Any = 50 ):
__lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 353
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DanceDiffusionPipeline
a_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=0 ) -> Any:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase ( self : List[str] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self : List[Any] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 207
| 0
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def snake_case__ ( _A: int , _A: Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase = k_size // 2
lowerCAmelCase , lowerCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(_A ) + square(_A )) / (2 * square(_A )) )
return g
def snake_case__ ( _A: List[str] , _A: Tuple , _A: Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase = height - k_size + 1
lowerCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase = 0
for i, j in product(range(_A ) , range(_A ) ):
lowerCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase = gen_gaussian_kernel(_A , _A )
lowerCAmelCase = ravel(_A )
# reshape and get the dst image
lowerCAmelCase = dot(_A , _A ).reshape(_A , _A ).astype(_A )
return dst
if __name__ == "__main__":
# read original image
__lowercase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
__lowercase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowercase = gaussian_filter(gray, 3, sigma=1)
__lowercase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 272
|
'''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272
| 1
|
import datasets
lowercase__ : Any = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowercase__ : Any = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowercase__ : int = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __lowercase ( _a , _a ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _snake_case ( self : str , lowercase_ : Dict , lowercase_ : List[str] ):
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ : List[Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''CLIPFeatureExtractor''']
lowercase__ : Any = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 155
| 0
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 171
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_A = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( lowerCAmelCase ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : Optional[int] = False
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if args.student_type == "roberta":
UpperCAmelCase__ : Tuple = False
def a__ ( ) -> int:
UpperCAmelCase__ : Dict = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=lowerCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCAmelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=lowerCAmelCase , required=lowerCAmelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=lowerCAmelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=lowerCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=lowerCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCAmelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCAmelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=lowerCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=lowerCAmelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=lowerCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=lowerCAmelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCAmelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCAmelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCAmelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=lowerCAmelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=lowerCAmelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=lowerCAmelCase , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=lowerCAmelCase , default=40_00 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : List[Any] = parser.parse_args()
sanity_checks(lowerCAmelCase )
# ARGS #
init_gpu_params(lowerCAmelCase )
set_seed(lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(lowerCAmelCase ) , lowerCAmelCase , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : List[Any] = tokenizer.all_special_tokens.index(lowerCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : List[str] = pickle.load(lowerCAmelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : List[Any] = pickle.load(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.maximum(lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : int = 0.0 # do not predict special tokens
UpperCAmelCase__ : str = torch.from_numpy(lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = LmSeqsDataset(params=lowerCAmelCase , data=lowerCAmelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
UpperCAmelCase__ : List[str] = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase__ : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase )
else:
UpperCAmelCase__ : List[Any] = student_model_class(lowerCAmelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase , lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase , lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[int] = Distiller(
params=lowerCAmelCase , dataset=lowerCAmelCase , token_probs=lowerCAmelCase , student=lowerCAmelCase , teacher=lowerCAmelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 171
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : int = {"vocab_file": "spiece.model"}
__lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __A (_SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : List[Any]="<sep>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Optional[int]="<cls>" , UpperCAmelCase_ : str="<mask>" , UpperCAmelCase_ : Union[str, Any]=["<eop>", "<eod>"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : int , ) ->None:
"""simple docstring"""
snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = 3
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
snake_case_ = jieba
snake_case_ = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if self.remove_space:
snake_case_ = """ """.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
snake_case_ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(UpperCAmelCase_ )
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
snake_case_ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Tuple ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Tuple ) ->int:
"""simple docstring"""
snake_case_ = """""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = super()._decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 351
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 233
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : str = DatasetDict(
{
'''train''': dataset['''train'''].select(_UpperCamelCase ),
'''validation''': dataset['''train'''].select(_UpperCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Union[str, Any] = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
snake_case_ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets['''test'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
# Download the dataset
snake_case_ : int = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
snake_case_ : int = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : str = config['''lr''']
snake_case_ : Union[str, Any] = int(config['''num_epochs'''] )
snake_case_ : Any = int(config['''seed'''] )
snake_case_ : Any = int(config['''batch_size'''] )
snake_case_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : str = MAX_GPU_BATCH_SIZE
set_seed(_UpperCamelCase )
# New Code #
# Create our folds:
snake_case_ : List[str] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
snake_case_ : List[str] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : int = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
snake_case_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[str] = model(**_UpperCamelCase )
snake_case_ : int = outputs.loss
snake_case_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : str = model(**_UpperCamelCase )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
snake_case_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Optional[int] = []
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : List[Any] = model(**_UpperCamelCase )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[str] = torch.cat(_UpperCamelCase , dim=0 )
snake_case_ : Optional[int] = torch.stack(_UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Optional[Any] = metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
accelerator.print('''Average test metrics from all folds:''' , _UpperCamelCase )
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=_UpperCamelCase , default=3 , help='''The number of splits to perform across the dataset''' )
snake_case_ : Optional[Any] = parser.parse_args()
snake_case_ : Dict = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = 1
lowerCamelCase_ = 3
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(A_ )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.dummy_cond_unet_upscale
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = DDIMScheduler(prediction_type='v_prediction' )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = output.images
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=A_ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase_ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.dummy_cond_unet_upscale
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = DDIMScheduler(prediction_type='v_prediction' )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = output.images
assert image.shape[0] == 2
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.dummy_cond_unet_upscale
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = DDIMScheduler(prediction_type='v_prediction' )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase_ = unet.half()
lowerCamelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = 'A painting of a squirrel eating a burger'
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type='np' , ).images
lowerCamelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
lowerCamelCase_ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCamelCase_ = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowerCamelCase_ = 'a cat sitting on a park bench'
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
lowerCamelCase_ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCamelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
lowerCamelCase_ = 'a cat sitting on a park bench'
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCamelCase_ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCamelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = 'a cat sitting on a park bench'
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type='np' , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 204
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInpaintPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase = frozenset([] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
A_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
A_ : Union[str, Any] = CLIPTextModel(a__ )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=0 ):
"""simple docstring"""
A_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
A_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : int = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
A_ : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
A_ : Union[str, Any] = torch.manual_seed(a__ )
else:
A_ : Optional[int] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : Tuple = StableDiffusionInpaintPipeline(**a__ )
A_ : List[str] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
A_ : List[Any] = self.get_dummy_inputs(a__ )
A_ : List[Any] = sd_pipe(**a__ ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : int = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Tuple ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : str ):
"""simple docstring"""
A_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
A_ : str = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : Union[str, Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : List[Any] = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
A_ : Union[str, Any] = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
A_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : List[str] = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self : str ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A_ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
A_ : Any = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
A_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : List[Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Dict = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
A_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 363
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'speech_to_text_2'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : Optional[int] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Any = decoder_attention_heads
A_ : int = dropout
A_ : str = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : str = activation_function
A_ : List[Any] = init_std
A_ : Union[str, Any] = decoder_layerdrop
A_ : Any = use_cache
A_ : Optional[Any] = decoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Optional[Any] = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 4
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
A__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _UpperCAmelCase ( snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = R""".*sequential.(\d+).*"""
_lowerCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
# replace sequential layers with list
_lowerCAmelCase = re.match(snake_case , snake_case ).group(1 )
_lowerCAmelCase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case )//3}.linear.' )
elif re.match(snake_case , snake_case ):
_lowerCAmelCase = int(re.match(snake_case , snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase = 1 if projecton_layer == 0 else 2
_lowerCAmelCase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase = value
_lowerCAmelCase = mixed_qkv.size(0 ) // 3
_lowerCAmelCase = mixed_qkv[:qkv_dim]
_lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase = query_layer
_lowerCAmelCase = key_layer
_lowerCAmelCase = value_layer
else:
_lowerCAmelCase = value
return model_state_dict
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = init_clap(snake_case , enable_fusion=snake_case )
clap_model.eval()
_lowerCAmelCase = clap_model.state_dict()
_lowerCAmelCase = rename_state_dict(snake_case )
_lowerCAmelCase = ClapConfig()
_lowerCAmelCase = enable_fusion
_lowerCAmelCase = ClapModel(snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case , strict=snake_case )
model.save_pretrained(snake_case )
transformers_config.save_pretrained(snake_case )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
A__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85
| 0
|
from copy import deepcopy
class lowerCamelCase :
def __init__(self : Any , _A : list[int] | None = None , _A : int | None = None ) -> None:
if arr is None and size is not None:
snake_case = size
snake_case = [0] * size
elif arr is not None:
self.init(_A )
else:
raise ValueError("Either arr or size must be specified" )
def UpperCAmelCase(self : str , _A : list[int] ) -> None:
snake_case = len(_A )
snake_case = deepcopy(_A )
for i in range(1 , self.size ):
snake_case = self.next_(_A )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase(self : Optional[int] ) -> list[int]:
snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case = self.next_(_A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase(_A : int ) -> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase(_A : int ) -> int:
return index - (index & (-index))
def UpperCAmelCase(self : Optional[int] , _A : int , _A : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case = self.next_(_A )
def UpperCAmelCase(self : Union[str, Any] , _A : int , _A : int ) -> None:
self.add(_A , value - self.get(_A ) )
def UpperCAmelCase(self : Optional[Any] , _A : int ) -> int:
if right == 0:
return 0
snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case = self.prev(_A )
return result
def UpperCAmelCase(self : List[Any] , _A : int , _A : int ) -> int:
return self.prefix(_A ) - self.prefix(_A )
def UpperCAmelCase(self : Tuple , _A : int ) -> int:
return self.query(_A , index + 1 )
def UpperCAmelCase(self : Optional[Any] , _A : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
return getitem, k
def lowercase_ ( A__ , A__ ) -> str:
"""simple docstring"""
return setitem, k, v
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
return delitem, k
def lowercase_ ( A__ , A__ , *A__ ) -> str:
"""simple docstring"""
try:
return fun(A__ , *A__ ), None
except Exception as e:
return None, e
_A = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
_A = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
_A = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
_A = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
snake_case = HashMap(initial_block_size=4 )
snake_case = {}
for _, (fun, *args) in enumerate(A__ ):
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
assert my_res == py_res
assert str(A__ ) == str(A__ )
assert set(A__ ) == set(A__ )
assert len(A__ ) == len(A__ )
assert set(my.items() ) == set(py.items() )
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
def is_public(A__ ) -> bool:
return not name.startswith("_" )
snake_case = {name for name in dir({} ) if is_public(A__ )}
snake_case = {name for name in dir(HashMap() ) if is_public(A__ )}
assert dict_public_names > hash_public_names
| 137
| 1
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(__SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(__SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = True ):
lowercase_ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase_ : Optional[int] = is_compiled_module(__SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase_ : Optional[Any] = model
lowercase_ : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = model.module
if not keep_fpaa_wrapper:
lowercase_ : str = getattr(__SCREAMING_SNAKE_CASE , 'forward' )
lowercase_ : int = model.__dict__.pop('_original_forward' , __SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(__SCREAMING_SNAKE_CASE , '__wrapped__' ):
lowercase_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
lowercase_ : int = forward
if getattr(__SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , __SCREAMING_SNAKE_CASE ):
convert_model(__SCREAMING_SNAKE_CASE , to_transformer_engine=__SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase_ : List[Any] = model
lowercase_ : Optional[Any] = compiled_model
return model
def lowercase__( ):
PartialState().wait_for_everyone()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@contextmanager
def lowercase__( **__SCREAMING_SNAKE_CASE : Optional[Any] ):
for key, value in kwargs.items():
lowercase_ : List[Any] = str(__SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if not hasattr(__SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(__SCREAMING_SNAKE_CASE , '__name__' ):
lowercase_ : List[str] = getattr(__SCREAMING_SNAKE_CASE , '__class__' , __SCREAMING_SNAKE_CASE )
if hasattr(__SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(__SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = destination.setdefault(__SCREAMING_SNAKE_CASE , {} )
merge_dicts(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase_ : List[str] = value
return destination
def lowercase__( __SCREAMING_SNAKE_CASE : int = None ):
if port is None:
lowercase_ : Optional[int] = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 213
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__SCREAMING_SNAKE_CASE ={
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : str = vocab_file
lowercase_ : Optional[Any] = False if not self.vocab_file else True
lowercase_ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : List[Any] = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Dict = src_lang if src_lang is not None else 'en_XX'
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : List[Any] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Optional[Any] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : Dict = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "en_XX" ,__UpperCamelCase = None ,__UpperCamelCase = "ro_RO" ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
lowercase_ : Union[str, Any] = src_lang
lowercase_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Any = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : Optional[Any] = []
lowercase_ : int = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : str = []
lowercase_ : Dict = [self.eos_token_id, self.cur_lang_code]
lowercase_ : str = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : Tuple = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file ,__UpperCamelCase )
return (out_vocab_file,)
| 213
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : int = {"""vocab_file""": """spiece.model"""}
lowercase : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
lowercase : Union[str, Any] = {"""bert_for_seq_generation""": 5_1_2}
class A__ ( UpperCamelCase__ ):
"""simple docstring"""
__A : List[str] = VOCAB_FILES_NAMES
__A : Dict = PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[int] = []
__A : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<::::>" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
a__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
a__ : str = vocab_file
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase_)
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
'''simple docstring'''
a__ : Any = self.__dict__.copy()
a__ : Tuple = None
return state
def __setstate__( self , lowercase) -> str:
'''simple docstring'''
a__ : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__ : Union[str, Any] = {}
a__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowercase ( self , lowercase) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_)
def __lowercase ( self , lowercase) -> List[str]:
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase_)
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Dict = self.sp_model.IdToPiece(UpperCamelCase_)
return token
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : List[Any] = []
a__ : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_) + token
a__ : Optional[int] = []
else:
current_sub_tokens.append(UpperCamelCase_)
out_string += self.sp_model.decode(UpperCamelCase_)
return out_string.strip()
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
a__ : Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase_ , 'wb') as fi:
a__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_)
return (out_vocab_file,)
| 355
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 0
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Any , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = data
lowercase__ = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0]
@staticmethod
def UpperCamelCase__ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = list(struct.unpack('''>16L''' , UpperCamelCase ) ) + [0] * 64
for i in range(16 , 80 ):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(UpperCamelCase )
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0x5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0x6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0x8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0xC_A_6_2_C_1_D_6
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = (
self.rotate(UpperCamelCase , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F,
a,
self.rotate(UpperCamelCase , 30 ),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0xF_F_F_F_F_F_F_F,
self.h[1] + b & 0xF_F_F_F_F_F_F_F,
self.h[2] + c & 0xF_F_F_F_F_F_F_F,
self.h[3] + d & 0xF_F_F_F_F_F_F_F,
self.h[4] + e & 0xF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
"""simple docstring"""
lowercase__ = b'''Test String'''
assert SHAaHash(A ).final_hash() == hashlib.shaa(A ).hexdigest() # noqa: S324
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A , '''utf-8''' )
print(SHAaHash(A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 2
|
'''simple docstring'''
import numpy as np
def UpperCamelCase( UpperCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : int , A__ : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCAmelCase_ : Any = (low + high) // 2
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = max_subarray(A__ , A__ , A__ )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = max_subarray(A__ , mid + 1 , A__ )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : int = max_cross_sum(A__ , A__ , A__ , A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def UpperCamelCase_ ( A__ : Sequence[float] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : Any = float("""-inf""" ), -1
lowerCAmelCase_, lowerCAmelCase_ : Any = float("""-inf""" ), -1
lowerCAmelCase_ : int | float = 0
for i in range(A__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCAmelCase_ : Any = summ
lowerCAmelCase_ : str = i
lowerCAmelCase_ : List[str] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCAmelCase_ : List[Any] = summ
lowerCAmelCase_ : Tuple = i
return max_left, max_right, (left_sum + right_sum)
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [randint(1 , A__ ) for _ in range(A__ )]
lowerCAmelCase_ : List[str] = time.time()
max_subarray(A__ , 0 , input_size - 1 )
lowerCAmelCase_ : str = time.time()
return end - start
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
lowerCAmelCase_ : Tuple = [time_max_subarray(A__ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(A__ , A__ ):
print(A__ , """\t\t""" , A__ )
plt.plot(A__ , A__ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 89
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
print("""Loading config file...""" )
def flatten_yaml_as_dict(A__ : int , A__ : str="" , A__ : Dict="." ):
lowerCAmelCase_ : int = []
for k, v in d.items():
lowerCAmelCase_ : Any = parent_key + sep + k if parent_key else k
if isinstance(A__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(A__ , A__ , sep=A__ ).items() )
else:
items.append((new_key, v) )
return dict(A__ )
lowerCAmelCase_ : Optional[int] = argparse.Namespace()
with open(A__ , """r""" ) as yaml_file:
try:
lowerCAmelCase_ : Any = yaml.load(A__ , Loader=yaml.FullLoader )
lowerCAmelCase_ : Any = flatten_yaml_as_dict(A__ )
for k, v in flat_cfg.items():
setattr(A__ , A__ , A__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(A__ , str(A__ ) ) )
return config
def UpperCamelCase_ ( A__ : Optional[int] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = MobileViTVaConfig()
lowerCAmelCase_ : Any = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCAmelCase_ : Tuple = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowerCAmelCase_ : List[str] = 3_84
else:
lowerCAmelCase_ : Optional[int] = 2_56
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCAmelCase_ : int = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowerCAmelCase_ : int = 3_84
else:
lowerCAmelCase_ : int = 2_56
lowerCAmelCase_ : Optional[Any] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCAmelCase_ : Dict = 1_51
lowerCAmelCase_ : Any = 5_12
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCAmelCase_ : Any = 21
lowerCAmelCase_ : List[str] = 5_12
lowerCAmelCase_ : Union[str, Any] = """pascal-voc-id2label.json"""
lowerCAmelCase_ : int = True
# orig_config
lowerCAmelCase_ : Optional[int] = load_orig_config_file(A__ )
assert getattr(A__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCAmelCase_ : Union[str, Any] = getattr(A__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(A__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCAmelCase_ : List[Any] = getattr(A__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCAmelCase_ : List[str] = getattr(A__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCAmelCase_ : Optional[int] = getattr(A__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCAmelCase_ : Optional[Any] = getattr(A__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
lowerCAmelCase_ : Optional[int] = getattr(A__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCAmelCase_ : Any = """huggingface/label-files"""
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Any = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = idalabel
lowerCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( A__ : int , A__ : Dict , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = dct.pop(A__ )
lowerCAmelCase_ : Optional[int] = val
def UpperCamelCase_ ( A__ : int , A__ : int=False ):
'''simple docstring'''
if base_model:
lowerCAmelCase_ : List[Any] = """"""
else:
lowerCAmelCase_ : Any = """mobilevitv2."""
lowerCAmelCase_ : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCAmelCase_ : List[Any] = k[8:]
else:
lowerCAmelCase_ : Optional[Any] = k
if ".block." in k:
lowerCAmelCase_ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCAmelCase_ : Any = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCAmelCase_ : List[Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCAmelCase_ : Any = k_new.replace("""conv_1.""" , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCAmelCase_ : List[str] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
lowerCAmelCase_ : str = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
lowerCAmelCase_ : Optional[Any] = [0, 1]
elif i == 4:
lowerCAmelCase_ : Any = [0, 1, 2, 3]
elif i == 5:
lowerCAmelCase_ : Any = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
lowerCAmelCase_ : str = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
lowerCAmelCase_ : Dict = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCAmelCase_ : Any = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCAmelCase_ : int = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCAmelCase_ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCAmelCase_ : Dict = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCAmelCase_ : List[Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(A__ )
for k in keys_to_ignore:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any] , A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : int = get_mobilevitva_config(A__ , A__ )
# load original state_dict
lowerCAmelCase_ : int = torch.load(A__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCAmelCase_ : Union[str, Any] = MobileViTVaForSemanticSegmentation(A__ ).eval()
lowerCAmelCase_ : Union[str, Any] = False
else:
lowerCAmelCase_ : List[Any] = MobileViTVaForImageClassification(A__ ).eval()
lowerCAmelCase_ : Optional[int] = False
# remove and rename some keys of load the original model
lowerCAmelCase_ : Tuple = checkpoint
remove_unused_keys(A__ )
lowerCAmelCase_ : List[Any] = create_rename_keys(A__ , base_model=A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ , A__ , A__ )
# load modified state_dict
model.load_state_dict(A__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase_ : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : int = model(**A__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCAmelCase_ : Optional[Any] = outputs.logits
lowerCAmelCase_ : Tuple = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCAmelCase_ : int = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__A : Union[str, Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 89
| 1
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__UpperCamelCase : Any = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.1_5},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__UpperCamelCase : List[Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase : Dict = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__UpperCamelCase : List[str] = '''allenai'''
def __SCREAMING_SNAKE_CASE ( A_ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCAmelCase__ : int = dict((re.sub(r'''@@$''' , '''''' , A_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , A_ ), v) for k, v in d.items() )
lowerCAmelCase__ : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowerCAmelCase__ : int = d[k] # restore
return da
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# prep
assert os.path.exists(A_ )
os.makedirs(A_ , exist_ok=A_ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowerCAmelCase__ : int = basename(A_ )
lowerCAmelCase__ : Tuple = dirname(A_ )
lowerCAmelCase__ : Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCAmelCase__ : Tuple = cls.hub_models()
lowerCAmelCase__ : Any = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
lowerCAmelCase__ : Optional[Any] = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
lowerCAmelCase__ : str = hub_utils.from_pretrained(
A_ , A_ , A_ , archive_map=A_ , **A_ )
lowerCAmelCase__ : Optional[int] = vars(chkpt['''args''']['''model'''] )
lowerCAmelCase__ : Any = args['''source_lang''']
lowerCAmelCase__ : int = args['''target_lang''']
lowerCAmelCase__ : Optional[Any] = dirname(A_ )
lowerCAmelCase__ : Tuple = basename(A_ )
# dicts
lowerCAmelCase__ : Tuple = os.path.join(A_ , f'dict.{src_lang}.txt' )
lowerCAmelCase__ : int = os.path.join(A_ , f'dict.{tgt_lang}.txt' )
lowerCAmelCase__ : str = Dictionary.load(A_ )
lowerCAmelCase__ : Any = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase__ : Optional[Any] = len(A_ )
lowerCAmelCase__ : Tuple = os.path.join(A_ , '''vocab-src.json''' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCAmelCase__ : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
lowerCAmelCase__ : Union[str, Any] = False
break
lowerCAmelCase__ : Optional[int] = Dictionary.load(A_ )
lowerCAmelCase__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[int] = os.path.join(A_ , '''vocab-tgt.json''' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) )
# merges_file (bpecodes)
lowerCAmelCase__ : int = os.path.join(A_ , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCAmelCase__ : Union[str, Any] = os.path.join(A_ , A_ )
if os.path.exists(A_ ):
break
with open(A_ , encoding='''utf-8''' ) as fin:
lowerCAmelCase__ : Optional[Any] = fin.read()
lowerCAmelCase__ : Optional[Any] = re.sub(r''' \d+$''' , '''''' , A_ , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(A_ )
# model config
lowerCAmelCase__ : List[str] = os.path.join(A_ , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowerCAmelCase__ : List[str] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
lowerCAmelCase__ : List[str] = 5
lowerCAmelCase__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCAmelCase__ : Tuple = best_score_hparams[model_dir]['''length_penalty''']
else:
lowerCAmelCase__ : int = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) )
# tokenizer config
lowerCAmelCase__ : Union[str, Any] = os.path.join(A_ , A_ )
lowerCAmelCase__ : List[Any] = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(A_ , ensure_ascii=A_ , indent=A_ ) )
# model
lowerCAmelCase__ : Optional[int] = chkpt['''models'''][0]
lowerCAmelCase__ : List[str] = model.state_dict()
# rename keys to start with 'model.'
lowerCAmelCase__ : Any = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCAmelCase__ : str = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(A_ , A_ )
lowerCAmelCase__ : Optional[int] = FSMTConfig.from_pretrained(A_ )
lowerCAmelCase__ : Any = FSMTForConditionalGeneration(A_ )
# check that it loads ok
model_new.load_state_dict(A_ , strict=A_ )
# save
lowerCAmelCase__ : str = os.path.join(A_ , A_ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(A_ , A_ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 106
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if not nums:
raise ValueError('List is empty' )
return sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
| 0
|
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = len(__a ) + 1
lowerCAmelCase__ : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
lowerCAmelCase__ : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
lowerCAmelCase__ : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ : int = dp[i - 1][j]
else:
lowerCAmelCase__ : Any = 0
else:
lowerCAmelCase__ : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCamelCase = '''aab'''
lowerCamelCase = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 355
|
from math import isqrt
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _a , _a ):
lowerCAmelCase__ : int = False
return [i for i in range(2 , _a ) if is_prime[i]]
def lowerCamelCase_ ( _a = 10**8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = len(_a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_a = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = list(s_dict.keys() )
for key in keys:
_UpperCamelCase = r'''.*/layers_(\d+)'''
_UpperCamelCase = key
if re.match(__snake_case, __snake_case ):
_UpperCamelCase = re.sub(r'''layers_(\d+)''', r'''block/\1/layer''', __snake_case )
_UpperCamelCase = r'''(encoder|decoder)\/'''
if re.match(__snake_case, __snake_case ):
_UpperCamelCase = re.match(__snake_case, __snake_case ).groups()
if groups[0] == "encoder":
_UpperCamelCase = re.sub(r'''/mlp/''', r'''/1/mlp/''', __snake_case )
_UpperCamelCase = re.sub(r'''/pre_mlp_layer_norm/''', r'''/1/layer_norm/''', __snake_case )
elif groups[0] == "decoder":
_UpperCamelCase = re.sub(r'''/mlp/''', r'''/2/mlp/''', __snake_case )
_UpperCamelCase = re.sub(r'''/pre_mlp_layer_norm/''', r'''/2/layer_norm/''', __snake_case )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCamelCase = new_key.replace(__snake_case, __snake_case )
print(F'''{key} -> {new_key}''' )
_UpperCamelCase = s_dict.pop(__snake_case )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCamelCase = s_dict[key].shape[0]
_UpperCamelCase = s_dict[key]
for idx in range(__snake_case ):
_UpperCamelCase = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(__snake_case )
return s_dict
_a = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
import regex as re
with open(__snake_case, '''r''' ) as f:
_UpperCamelCase = f.read()
_UpperCamelCase = re.findall(r'''(.*) = ([0-9.]*)''', __snake_case )
_UpperCamelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCamelCase = float(__snake_case ) if '''.''' in value else int(__snake_case )
_UpperCamelCase = re.findall(r'''(.*activations) = \(\'(.*)\',\)''', __snake_case )[0]
_UpperCamelCase = str(activation[1] )
_UpperCamelCase = num_experts
_UpperCamelCase = SwitchTransformersConfig(**__snake_case )
return config
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, __snake_case="./", __snake_case=8 ) -> List[Any]:
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCamelCase = checkpoints.load_tax_checkpoint(__snake_case )
if gin_file is not None:
_UpperCamelCase = convert_gin_to_config(__snake_case, __snake_case )
else:
_UpperCamelCase = SwitchTransformersConfig.from_pretrained(__snake_case )
_UpperCamelCase = SwitchTransformersForConditionalGeneration(__snake_case )
_UpperCamelCase = flax_params['''target''']
_UpperCamelCase = flatten_dict(__snake_case, sep='''/''' )
_UpperCamelCase = rename_keys(__snake_case )
_UpperCamelCase = unflatten_dict(__snake_case, sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__snake_case, __snake_case )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_a = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 194
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( _a , unittest.TestCase):
"""simple docstring"""
a__ : str = DiTPipeline
a__ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a__ : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_= TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__lowerCamelCase , )
UpperCAmelCase_= AutoencoderKL()
UpperCAmelCase_= DDIMScheduler()
UpperCAmelCase_= {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple=0 ) -> List[Any]:
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__lowerCamelCase )
else:
UpperCAmelCase_= torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCAmelCase_= {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase_= self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase_= pipe(**__lowerCamelCase ).images
UpperCAmelCase_= image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCAmelCase_= np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
UpperCAmelCase_= np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
UpperCAmelCase_= ["""vase""", """umbrella""", """white shark""", """white wolf"""]
UpperCAmelCase_= pipe.get_label_ids(__lowerCamelCase )
UpperCAmelCase_= pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase_= load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
UpperCAmelCase_= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
UpperCAmelCase_= ["""vase""", """umbrella"""]
UpperCAmelCase_= pipe.get_label_ids(__lowerCamelCase )
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pipe(__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 368
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = '''https://openaipublic.azureedge.net/jukebox/models/'''
__A = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __a ( lowerCAmelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase_= key.replace(""".model.1.bias""" ,""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase_= key.replace(""".model.1.weight""" ,""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase_= key.replace(""".model.3.bias""" ,""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
UpperCAmelCase_= key.replace(""".model.3.weight""" ,""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
UpperCAmelCase_= key.replace("""conditioner_blocks.0""" ,"""conditioner_blocks""" )
if "prime_prior" in key:
UpperCAmelCase_= key.replace("""prime_prior""" ,"""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCAmelCase_= key.replace(""".emb.""" ,""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" ,""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" ,"""metadata_embedding.""" )
if "x_emb.emb." in key:
UpperCAmelCase_= key.replace("""0.x_emb.emb""" ,"""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" ,"""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" ,""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" ,"""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" ,"""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" ,"""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" ,"""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" ,"""embed_tokens""" )
return key
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= {}
import re
UpperCAmelCase_= re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCAmelCase_= re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase_= re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase_= re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
UpperCAmelCase_= re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase_= re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase_= re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
UpperCAmelCase_= re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
UpperCAmelCase_= re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_encoder_block_conv_in.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase_= F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase_= re_encoder_block_conv_in.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_encoder_block_resnet.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase_= {"""1""": 1, """3""": 2}[groups[-2]]
UpperCAmelCase_= F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCAmelCase_= F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase_= prefix + resnet_block
UpperCAmelCase_= re_encoder_block_resnet.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_encoder_block_proj_out.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCAmelCase_= re_encoder_block_proj_out.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_decoder_block_conv_out.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase_= F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase_= re_decoder_block_conv_out.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_decoder_block_resnet.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase_= {"""1""": 1, """3""": 2}[groups[-2]]
UpperCAmelCase_= F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCAmelCase_= F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase_= prefix + resnet_block
UpperCAmelCase_= re_decoder_block_resnet.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_decoder_block_proj_in.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCAmelCase_= re_decoder_block_proj_in.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_prior_cond_conv_out.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase_= F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase_= re_prior_cond_conv_out.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_prior_cond_resnet.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase_= {"""1""": 1, """3""": 2}[groups[-2]]
UpperCAmelCase_= F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCAmelCase_= F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase_= prefix + resnet_block
UpperCAmelCase_= re_prior_cond_resnet.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ):
UpperCAmelCase_= re_prior_cond_proj_in.match(lowerCAmelCase_ )
UpperCAmelCase_= regex_match.groups()
UpperCAmelCase_= F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCAmelCase_= re_prior_cond_proj_in.sub(lowerCAmelCase_ ,lowerCAmelCase_ )
# keep original key
else:
UpperCAmelCase_= original_key
UpperCAmelCase_= replace_key(lowerCAmelCase_ )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
UpperCAmelCase_= model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
UpperCAmelCase_= original_key
UpperCAmelCase_= original_key
UpperCAmelCase_= value
return new_dict
@torch.no_grad()
def __a ( lowerCAmelCase_ : List[Any]=None ,lowerCAmelCase_ : str=None ) -> List[str]:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
UpperCAmelCase_= requests.get(F"""{PREFIX}{file}""" ,allow_redirects=lowerCAmelCase_ )
os.makedirs(F"""{pytorch_dump_folder_path}/""" ,exist_ok=lowerCAmelCase_ )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ,"""wb""" ).write(r.content )
UpperCAmelCase_= MODEL_MAPPING[model_name.split("""/""" )[-1]]
UpperCAmelCase_= JukeboxConfig.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_= JukeboxModel(lowerCAmelCase_ )
UpperCAmelCase_= []
UpperCAmelCase_= {}
for i, dict_name in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
UpperCAmelCase_= {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
UpperCAmelCase_= old_dic[k]
elif k.endswith(""".w""" ):
UpperCAmelCase_= old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCAmelCase_= old_dic[k]
else:
UpperCAmelCase_= old_dic[k]
UpperCAmelCase_= """vqvae""" if i == 0 else F"""priors.{3 - i}"""
UpperCAmelCase_= fix_jukebox_keys(lowerCAmelCase_ ,model.state_dict() ,lowerCAmelCase_ ,lowerCAmelCase_ )
weight_dict.append(lowerCAmelCase_ )
UpperCAmelCase_= weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" ,"""w""" ) as txtfile:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
return weight_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
__A = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 277
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase : List[str] = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowercase : str = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : int = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A : Optional[Any] = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(a_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A : int = conv.img_convolve(a_ , a_ ).astype(a_ )
assert res.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert med.median_filter(a_ , 3 ).any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = sp.make_sepia(a_ , 20 )
assert sepia.all()
def lowerCAmelCase_ ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A : Union[str, Any] = bs.Burkes(imread(a_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase_ ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A : str = rs.NearestNeighbour(imread(a_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A : Dict = imread(a_ , 0 )
# Test for get_neighbors_pixel function() return not None
A : Union[str, Any] = 0
A : List[str] = 0
A : Optional[int] = image[x_coordinate][y_coordinate]
A : Union[str, Any] = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A : int = lbp.local_binary_value(a_ , a_ , a_ )
assert lbp_image.any()
| 3
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5
|
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5
| 1
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = value_function
SCREAMING_SNAKE_CASE : List[str] = unet
SCREAMING_SNAKE_CASE : Dict = scheduler
SCREAMING_SNAKE_CASE : Any = env
SCREAMING_SNAKE_CASE : Dict = env.get_dataset()
SCREAMING_SNAKE_CASE : List[str] = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE : Dict = self.data[key].mean()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE : Dict = {}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE : List[str] = env.observation_space.shape[0]
SCREAMING_SNAKE_CASE : List[Any] = env.action_space.shape[0]
def __UpperCamelCase ( self : Dict , a : Any , a : Optional[int] ) -> int:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self : Any , a : List[str] , a : Optional[int] ) -> List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self : Union[str, Any] , a : int ) -> List[str]:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def __UpperCamelCase ( self : List[Any] , a : Optional[Any] , a : List[Any] , a : List[str] ) -> Dict:
"""simple docstring"""
for key, val in cond.items():
SCREAMING_SNAKE_CASE : List[Any] = val.clone()
return x_in
def __UpperCamelCase ( self : Optional[int] , a : str , a : List[str] , a : Any , a : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
SCREAMING_SNAKE_CASE : Tuple = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
SCREAMING_SNAKE_CASE : List[str] = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
SCREAMING_SNAKE_CASE : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
SCREAMING_SNAKE_CASE : Dict = self.scheduler._get_variance(a )
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * posterior_variance )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_std * grad
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[str] = x.detach()
SCREAMING_SNAKE_CASE : Any = x + scale * grad
SCREAMING_SNAKE_CASE : Dict = self.reset_xa(a , a , self.action_dim )
SCREAMING_SNAKE_CASE : Optional[Any] = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(a , a , a , predict_epsilon=a )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
SCREAMING_SNAKE_CASE : Dict = self.reset_xa(a , a , self.action_dim )
SCREAMING_SNAKE_CASE : Tuple = self.to_torch(a )
return x, y
def __call__( self : Dict , a : int , a : int=64 , a : Union[str, Any]=32 , a : str=2 , a : Tuple=0.1 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.normalize(a , "observations" )
SCREAMING_SNAKE_CASE : List[Any] = obs[None].repeat(a , axis=0 )
SCREAMING_SNAKE_CASE : str = {0: self.to_torch(a )}
SCREAMING_SNAKE_CASE : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
SCREAMING_SNAKE_CASE : str = randn_tensor(a , device=self.unet.device )
SCREAMING_SNAKE_CASE : List[Any] = self.reset_xa(a , a , self.action_dim )
SCREAMING_SNAKE_CASE : Dict = self.to_torch(a )
# run the diffusion process
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
SCREAMING_SNAKE_CASE : int = y.argsort(0 , descending=a ).squeeze()
SCREAMING_SNAKE_CASE : Union[str, Any] = x[sorted_idx]
SCREAMING_SNAKE_CASE : List[Any] = sorted_values[:, :, : self.action_dim]
SCREAMING_SNAKE_CASE : Dict = actions.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = self.de_normalize(a , key="actions" )
# select the action with the highest value
if y is not None:
SCREAMING_SNAKE_CASE : Tuple = 0
else:
# if we didn't run value guiding, select a random action
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randint(0 , a )
SCREAMING_SNAKE_CASE : Optional[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 76
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A__ ( UpperCamelCase ):
A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
A = s_dict.pop(UpperCamelCase )
def A__ ( UpperCamelCase ):
A, A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase , UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = mam_aaa["args"]
A = mam_aaa["model"]
A = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = args.share_decoder_input_output_embed
A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
A = SpeechaTextForConditionalGeneration(UpperCamelCase )
A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 292
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCAmelCase ( __snake_case : str ) -> List[str]:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCAmelCase ( ) -> Optional[int]:
__A : Dict = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__snake_case )
__A : str = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
TestCommand.register_subcommand(__snake_case )
RunBeamCommand.register_subcommand(__snake_case )
DummyDataCommand.register_subcommand(__snake_case )
# Parse args
__A ,__A : List[Any] = parser.parse_known_args()
if not hasattr(__snake_case , 'func' ):
parser.print_help()
exit(1 )
__A : int = parse_unknown_args(__snake_case )
# Run
__A : Any = args.func(__snake_case , **__snake_case )
service.run()
if __name__ == "__main__":
main()
| 190
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 0
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
# Check that tokenizer_type ≠ model_type
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A : List[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__A : str = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TOKENIZER_MAPPING.values()
__A : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase) , _UpperCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCAmelCase)
__A : str = 'Hello, world. How are you?'
__A : List[str] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__A : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCAmelCase)
__A : List[Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 3_0000)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = get_tokenizer_config('bert-base-cased')
__A : Optional[int] = config.pop('_commit_hash' , _UpperCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__A : Dict = get_tokenizer_config(_UpperCAmelCase)
self.assertDictEqual(_UpperCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = get_tokenizer_config(_UpperCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
__A : Optional[Any] = CustomTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : int = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[int] = BertTokenizerFast.from_pretrained(_UpperCAmelCase)
bert_tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = CustomTokenizerFast.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
__A : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase):
__A : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
__A : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = False
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# If remote code is not set, the default is to use local
__A : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Any = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 190
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=23 , _SCREAMING_SNAKE_CASE=11 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = act_dim
UpperCamelCase = state_dim
UpperCamelCase = hidden_size
UpperCamelCase = max_length
UpperCamelCase = is_training
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Any:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = DecisionTransformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class a_ ( __a , __a , __a , unittest.TestCase ):
lowercase = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase = ()
lowercase = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = DecisionTransformerModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DecisionTransformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCAmelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase = 10 # defined by the RL environment, may be normalized
UpperCamelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
UpperCamelCase = model.to(lowerCAmelCase__ )
UpperCamelCase = model.config
torch.manual_seed(0 )
UpperCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase__ , dtype=torch.floataa ) # env.reset()
UpperCamelCase = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=lowerCAmelCase__ )
UpperCamelCase = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase = state
UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase__ , dtype=torch.floataa )
UpperCamelCase = torch.zeros(1 , 0 , device=lowerCAmelCase__ , dtype=torch.floataa )
UpperCamelCase = torch.tensor(0 , device=lowerCAmelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCAmelCase__ ):
UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase__ )] , dim=1 )
UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase__ )] , dim=1 )
UpperCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = model(
states=lowerCAmelCase__ , actions=lowerCAmelCase__ , rewards=lowerCAmelCase__ , returns_to_go=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase = action_pred[0, -1]
UpperCamelCase = torch.cat([states, state] , dim=1 )
UpperCamelCase = returns_to_go[0, -1] - reward
UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCAmelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 321
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''keras_nlp''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""keras_nlp"""])
| 100
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''pixel_values''']
def __init__( self : Union[str, Any] , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 255 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : bool = True , **__UpperCamelCase : Any , ) -> None:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
_UpperCamelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase = do_convert_rgb
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ) -> np.ndarray:
_UpperCamelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCamelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Any , ) -> np.ndarray:
_UpperCamelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[Any] , ) -> Union[str, Any]:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Any , ) -> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : int = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : Optional[Any] , ) -> PIL.Image.Image:
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__UpperCamelCase , param_name='''size''' , default_to_square=__UpperCamelCase )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' , default_to_square=__UpperCamelCase )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase = [convert_to_rgb(__UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 54
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = TextaTextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ANY(__UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = generator('''This is a test''' , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
| 54
| 1
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 206
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : str = parent
A_ : int = batch_size
A_ : Dict = seq_length
A_ : Any = is_training
A_ : List[str] = use_input_mask
A_ : Any = use_token_type_ids
A_ : int = use_labels
A_ : str = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[int] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : int = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[int] = scope
def _a (self ):
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
if self.use_token_type_ids:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = None
A_ : List[str] = None
A_ : List[str] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
A_ : List[Any] = BioGptForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Optional[Any] = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
# create attention mask
A_ : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
A_ : Optional[Any] = self.seq_length // 2
A_ : List[Any] = 0
# first forward pass
A_, A_ : List[str] = model(lowercase , attention_mask=lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A_ : Union[str, Any] = ids_tensor((1,) , lowercase ).item() + 1
A_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A_ : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Any = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , )
# get two different outputs
A_ : List[Any] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
A_ : Optional[int] = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["""last_hidden_state"""]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : int = output_from_no_past[:, -1, random_slice_idx].detach()
A_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Optional[int] = BioGptModel(config=lowercase ).to(lowercase ).eval()
A_ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
# first forward pass
A_ : int = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
A_, A_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : str = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A_ : List[str] = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""]
A_ : Tuple = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"""last_hidden_state"""
]
# select random slice
A_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ):
A_ : Union[str, Any] = BioGptForCausalLM(lowercase )
model.to(lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A_ : Dict = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _a (self , lowercase , *lowercase ):
A_ : Union[str, Any] = BioGptModel(lowercase )
A_ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ):
A_ : Union[str, Any] = self.num_labels
A_ : Optional[int] = BioGptForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self ):
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = (BioGptForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = False
def _a (self ):
A_ : Tuple = BioGptModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : int = type
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase )
@slow
def _a (self ):
A_ : str = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : List[str] = """left"""
# Define PAD Token = EOS Token = 50256
A_ : Any = tokenizer.eos_token
A_ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A_ : List[Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A_ : List[str] = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase )
A_ : List[str] = inputs["""input_ids"""].to(lowercase )
A_ : List[Any] = model.generate(
input_ids=lowercase , attention_mask=inputs["""attention_mask"""].to(lowercase ) , )
A_ : List[str] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowercase )
A_ : List[Any] = model.generate(input_ids=lowercase )
A_ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A_ : Union[str, Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowercase )
A_ : Any = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
A_ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
A_ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
A_ : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
A_ : Union[str, Any] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
@slow
def _a (self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = BioGptModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = 3
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(lowercase )
A_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ : Union[str, Any] = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ):
A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = 3
A_ : Dict = """multi_label_classification"""
A_ : List[Any] = input_dict["""input_ids"""]
A_ : Tuple = input_ids.ne(1 ).to(lowercase )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Dict = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A_ : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] )
A_ : Dict = model(lowercase )[0]
A_ : Any = 42384
A_ : Union[str, Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : Dict = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(lowercase )
torch.manual_seed(0 )
A_ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowercase )
A_ : Optional[int] = model.generate(
**lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , )
A_ : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase )
A_ : Any = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowercase , lowercase )
| 206
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = StableDiffusionPanoramaPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
_lowerCAmelCase : str = DDIMScheduler()
torch.manual_seed(0)
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowerCAmelCase : List[str] = CLIPTextModel(__a)
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_lowerCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
_lowerCAmelCase : int = torch.manual_seed(__a)
_lowerCAmelCase : Dict = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : str = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Dict = sd_pipe(**__a).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.25E-3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Optional[Any] = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Any = "french fries"
_lowerCAmelCase : Union[str, Any] = sd_pipe(**__a, negative_prompt=__a)
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Union[str, Any] = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Any = sd_pipe(**__a, view_batch_size=2)
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear")
_lowerCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Any = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : str = self.get_dummy_inputs(__a)
_lowerCAmelCase : Union[str, Any] = sd_pipe(**__a).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = PNDMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=__a)
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**__a)
_lowerCAmelCase : Dict = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs(__a)
_lowerCAmelCase : str = sd_pipe(**__a).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(__a)
_lowerCAmelCase : Union[str, Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : List[Any] = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Dict = self.get_inputs()
_lowerCAmelCase : Union[str, Any] = pipe(**__a).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : int = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
])
assert np.abs(expected_slice - image_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", safety_checker=__a)
_lowerCAmelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : Union[str, Any] = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**__a).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : Tuple = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = 0
def callback_fn(__a, __a, __a) -> None:
_lowerCAmelCase : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Dict = latents[0, -3:, -3:, -1]
_lowerCAmelCase : str = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
_lowerCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Any = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : Any = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
_lowerCAmelCase : Dict = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = self.get_inputs()
pipe(**__a, callback=__a, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__ ( self):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : str = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(__a, subfolder="scheduler")
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(__a, scheduler=__a, safety_checker=__a)
_lowerCAmelCase : Optional[int] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : int = self.get_inputs()
_lowerCAmelCase : int = pipe(**__a)
_lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Dict = LEDTokenizer
lowerCAmelCase : List[str] = LEDTokenizerFast
lowerCAmelCase : List[Any] = True
def __lowercase ( self : List[str] ):
super().setUp()
_a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_a : Tuple = dict(zip(_UpperCAmelCase ,range(len(_UpperCAmelCase ) ) ) )
_a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_a : Tuple = {'unk_token': '<unk>'}
_a : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def __lowercase ( self : List[Any] ,**_UpperCAmelCase : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Dict ,**_UpperCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Any ,_UpperCAmelCase : Any ):
return "lower newer", "lower newer"
@cached_property
def __lowercase ( self : str ):
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __lowercase ( self : Optional[Any] ):
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __lowercase ( self : List[str] ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Optional[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : List[Any] = tokenizer(_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_a : int = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@require_torch
def __lowercase ( self : Union[str, Any] ):
_a : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : int = tokenizer(_UpperCAmelCase ,padding=_UpperCAmelCase ,return_tensors='pt' )
self.assertIn('input_ids' ,_UpperCAmelCase )
self.assertIn('attention_mask' ,_UpperCAmelCase )
self.assertNotIn('labels' ,_UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' ,_UpperCAmelCase )
@require_torch
def __lowercase ( self : str ):
_a : List[Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Dict = tokenizer(text_target=_UpperCAmelCase ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def __lowercase ( self : int ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Any = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual(batch.input_ids.shape ,(2, 5122) )
@require_torch
def __lowercase ( self : List[str] ):
_a : int = ['A long paragraph for summarization.']
_a : int = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Dict = tokenizer(_UpperCAmelCase ,return_tensors='pt' )
_a : List[str] = tokenizer(text_target=_UpperCAmelCase ,return_tensors='pt' )
_a : List[Any] = inputs['input_ids']
_a : Tuple = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowercase ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Optional[Any] = ['Summary of the text.', 'Another summary.']
_a : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a : List[Any] = tokenizer(_UpperCAmelCase ,padding=_UpperCAmelCase )
_a : int = [[0] * len(_UpperCAmelCase ) for x in encoded_output['input_ids']]
_a : List[Any] = tokenizer.pad(_UpperCAmelCase )
self.assertSequenceEqual(outputs['global_attention_mask'] ,_UpperCAmelCase )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Dict = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = 'A, <mask> AllenNLP sentence.'
_a : int = tokenizer_r.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase )
_a : List[Any] = tokenizer_p.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_a : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_a : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 89
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 370
|
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if num <= 0:
a__ : Tuple = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_lowercase)
a__ : List[Any] = [True] * (num + 1)
a__ : List[str] = []
a__ : List[Any] = 2
a__ : Optional[int] = int(math.sqrt(_lowercase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowercase)
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowercase):
if sieve[i] is True:
a__ : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(_lowercase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 266
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
if len(__lowercase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
__lowerCAmelCase = list(__lowercase )
__lowerCAmelCase = degree
def __add__(self , __lowercase ):
if self.degree > polynomial_a.degree:
__lowerCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowercase )
else:
__lowerCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowercase )
def __sub__(self , __lowercase ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__(self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__(self , __lowercase ):
__lowerCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__(self ):
__lowerCAmelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowercase )
return polynomial
def __repr__(self ):
return self.__str__()
def _snake_case (self ):
__lowerCAmelCase = [0] * self.degree
for i in range(self.degree ):
__lowerCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowercase )
def _snake_case (self , __lowercase = 0 ):
__lowerCAmelCase = [0] * (self.degree + 2)
__lowerCAmelCase = constant
for i in range(self.degree + 1 ):
__lowerCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowercase )
def __eq__(self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__(self , __lowercase ):
return not self.__eq__(__lowercase )
| 174
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174
| 1
|
"""simple docstring"""
from __future__ import annotations
lowercase_ = 10
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 1
lowercase__ = max(SCREAMING_SNAKE_CASE_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowercase__ = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowercase__ = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE_ )
# put each buckets' contents into list_of_ints
lowercase__ = 0
for b in range(SCREAMING_SNAKE_CASE_ ):
for i in buckets[b]:
lowercase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
from pathlib import Path
import fire
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 224
| 0
|
'''simple docstring'''
from math import factorial
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCAmelCase ) // (factorial(lowerCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 70
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_lowerCAmelCase = []
for num in range(len(lowerCAmelCase ) ):
_lowerCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
_lowerCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase ) == n:
return list_nums
return []
def UpperCamelCase__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__lowerCAmelCase : int =["""small""", """medium""", """large"""]
__lowerCAmelCase : List[str] ="""lm_head.decoder.weight"""
__lowerCAmelCase : int ="""lm_head.weight"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
lowercase = torch.load(__a )
lowercase = d.pop(__a )
os.makedirs(__a , exist_ok=__a )
torch.save(__a , os.path.join(__a , __a ) )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__lowerCAmelCase : int =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__lowerCAmelCase : Optional[Any] =os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__lowerCAmelCase : Optional[Any] =F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 365
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''sew'''
def __init__( self : Optional[int] , A : Tuple=32 , A : Union[str, Any]=7_68 , A : Optional[Any]=12 , A : Dict=12 , A : List[Any]=30_72 , A : Optional[int]=2 , A : Optional[int]="gelu" , A : List[Any]=0.1 , A : List[Any]=0.1 , A : Optional[Any]=0.1 , A : Optional[Any]=0.0 , A : Tuple=0.1 , A : Union[str, Any]=0.1 , A : List[Any]=0.0_2 , A : str=1E-5 , A : Tuple="group" , A : Tuple="gelu" , A : int=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , A : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : List[str]=False , A : Dict=1_28 , A : List[str]=16 , A : List[Any]=True , A : int=0.0_5 , A : Optional[Any]=10 , A : str=2 , A : Optional[Any]=0.0 , A : Union[str, Any]=10 , A : List[Any]=0 , A : Optional[int]="mean" , A : Tuple=False , A : str=False , A : Tuple=2_56 , A : Tuple=0 , A : Optional[int]=1 , A : Optional[int]=2 , **A : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A)
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(A)
_UpperCAmelCase = list(A)
_UpperCAmelCase = list(A)
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim)
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = squeeze_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# sequence classification
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
@property
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 339
|
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339
| 1
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 200 ) -> int:
_UpperCAmelCase : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
_UpperCAmelCase : Union[str, Any] = [0] * (pence + 1)
_UpperCAmelCase : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 350
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (PNDMScheduler,)
_lowercase = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Any = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCAmelCase : Dict = model(A_ , A_ )
_UpperCAmelCase : int = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCAmelCase : Any = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , A_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ):
_UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Any = dummy_past_residuals[:]
_UpperCAmelCase : Any = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : Optional[int] = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : str = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCAmelCase : Dict = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop()
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 189
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline
_lowerCAmelCase : Dict = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_lowerCAmelCase : Optional[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_lowerCAmelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase : Optional[int] = False
@property
def _snake_case ( self : List[str] ):
return 32
@property
def _snake_case ( self : Dict ):
return 32
@property
def _snake_case ( self : Any ):
return self.time_input_dim
@property
def _snake_case ( self : Any ):
return self.time_input_dim * 4
@property
def _snake_case ( self : Any ):
return 100
@property
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _snake_case ( self : int ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
snake_case_ : List[str] = MultilingualCLIP(lowercase_ )
snake_case_ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self : int ):
torch.manual_seed(0 )
snake_case_ : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case_ : Tuple = UNetaDConditionModel(**lowercase_ )
return model
@property
def _snake_case ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Any ):
torch.manual_seed(0 )
snake_case_ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : str ):
snake_case_ : str = self.dummy_text_encoder
snake_case_ : Union[str, Any] = self.dummy_tokenizer
snake_case_ : str = self.dummy_unet
snake_case_ : Dict = self.dummy_movq
snake_case_ : Any = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case_ : str = DDIMScheduler(**lowercase_ )
snake_case_ : Any = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self : int , lowercase_ : str , lowercase_ : Any=0 ):
snake_case_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
snake_case_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Union[str, Any] = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : Tuple = torch.manual_seed(lowercase_ )
else:
snake_case_ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self : List[str] ):
snake_case_ : str = '''cpu'''
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**lowercase_ )
snake_case_ : List[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ : int = pipe(**self.get_dummy_inputs(lowercase_ ) )
snake_case_ : List[Any] = output.images
snake_case_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Optional[Any] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : str ):
snake_case_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
snake_case_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case_ : List[str] = '''A red cartoon frog, 4k'''
snake_case_ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
snake_case_ : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
snake_case_ : Dict = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
snake_case_ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_, snake_case_ : Optional[Any] = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case_ : Union[str, Any] = pipeline(
lowercase_ , image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
snake_case_ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 264
|
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a=None , _a=None ):
"""simple docstring"""
lowerCamelCase = list(poly_a or [0] )[:]
lowerCamelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCamelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCamelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCamelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCamelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCamelCase = self.__multiply()
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(_a ) <= 1:
return dft[0]
#
lowerCamelCase = self.c_max_length // 2
while next_ncol > 0:
lowerCamelCase = [[] for i in range(_a )]
lowerCamelCase = self.root**next_ncol
# First half of next step
lowerCamelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCamelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCamelCase = new_dft
lowerCamelCase = next_ncol // 2
return dft[0]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.__dft("""A""" )
lowerCamelCase = self.__dft("""B""" )
lowerCamelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCamelCase = 2
while next_ncol <= self.c_max_length:
lowerCamelCase = [[] for i in range(_a )]
lowerCamelCase = self.root ** (next_ncol // 2)
lowerCamelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCamelCase = new_inverse_c
next_ncol *= 2
# Unpack
lowerCamelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
"""simple docstring"""
lowerCamelCase = """A = """ + """ + """.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCamelCase = """B = """ + """ + """.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCamelCase = """A*B = """ + """ + """.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase = ksize + 1
lowerCamelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(snake_case__ ):
for x in range(snake_case__ ):
# distance from center
lowerCamelCase = x - ksize // 2
lowerCamelCase = y - ksize // 2
# degree to radiant
lowerCamelCase = theta / 1_80 * np.pi
lowerCamelCase = np.cos(_theta )
lowerCamelCase = np.sin(_theta )
# get kernel x
lowerCamelCase = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase : Optional[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCAmelCase : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase : Optional[int] = out / out.max() * 255
lowerCAmelCase : Tuple = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 168
| 0
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__(self : str , __a : Optional[Any] , __a : Union[str, Any]=14 , __a : List[str]=7 , __a : Union[str, Any]=True , __a : int=True , __a : List[str]=True , __a : str=True , __a : str=True , __a : List[str]=99 , __a : Union[str, Any]=32 , __a : Optional[Any]=5 , __a : int=4 , __a : List[Any]=37 , __a : Optional[Any]="gelu" , __a : Tuple=0.1 , __a : Any=0.1 , __a : int=512 , __a : int=16 , __a : Optional[int]=2 , __a : Any=0.02 , __a : str=3 , __a : Optional[Any]=4 , __a : Optional[Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = use_mc_token_ids
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = self.vocab_size - 1
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
if self.use_mc_token_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase (self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase (self : Dict , __a : List[str] , __a : int , __a : Tuple , __a : int , __a : Union[str, Any] , *__a : List[Any] ):
UpperCAmelCase_ = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase (self : List[Any] , __a : Tuple , __a : Any , __a : Tuple , __a : str , __a : Tuple , *__a : str ):
UpperCAmelCase_ = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _lowercase (self : Tuple , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Optional[Any] , *__a : Optional[int] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ : List[Any] = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Any = True
a__ : Union[str, Any] = False
a__ : Dict = False
def _lowercase (self : List[Any] , __a : int , __a : Optional[int] , __a : Dict , __a : int , __a : List[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase (self : Any ):
UpperCAmelCase_ = CTRLModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , n_embd=37 )
def _lowercase (self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase (self : Optional[int] ):
pass
@slow
def _lowercase (self : Union[str, Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _lowercase (self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(__a )
UpperCAmelCase_ = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
UpperCAmelCase_ = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase_ = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 1
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Union[str, Any] , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 255 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_flip_channel_order
def lowerCamelCase_ ( self : int , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PIL.Image.BILINEAR , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase = get_resize_output_image_size(lowerCamelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[int, float] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[str] , ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
return flip_channel_order(lowerCamelCase_ , data_format=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : float = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" )
UpperCamelCase = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase = [self.flip_channel_order(image=lowerCamelCase_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Tuple] = None ):
"""simple docstring"""
UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase_ ):
UpperCamelCase = target_sizes.numpy()
UpperCamelCase = []
for idx in range(len(lowerCamelCase_ ) ):
UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCamelCase_ )
UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase_ )
else:
UpperCamelCase = logits.argmax(dim=1 )
UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 165
|
import argparse
_SCREAMING_SNAKE_CASE = """docs/source/_static/js/custom.js"""
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCamelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
UpperCamelCase = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
update_custom_js(args.version)
| 165
| 1
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_a : str = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_a : Union[str, Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_a : List[str] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def __A ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def __A ( self , a__ , a__ , a__=None , a__="uniform_average" , a__=True ):
_lowerCAmelCase : int = mean_squared_error(
a__ , a__ , sample_weight=a__ , multioutput=a__ , squared=a__ )
return {"mse": mse}
| 44
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Optional[int] = torch.device("cpu")
def _snake_case ( ):
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def _snake_case ( UpperCamelCase : int ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str ):
UpperCAmelCase : int = dct.pop(UpperCamelCase )
UpperCAmelCase : Any = val
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
UpperCAmelCase : Optional[Any] = k
if ".pwconv" in k:
UpperCAmelCase : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Tuple = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : List[Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : int = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : List[Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : Optional[Any] = 1000
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : Any = [3, 3, 6, 4]
UpperCAmelCase : List[str] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : Dict = [3, 3, 9, 6]
UpperCAmelCase : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : int = [4, 3, 10, 5]
UpperCAmelCase : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Union[str, Any] = [4, 4, 12, 6]
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )
else:
UpperCAmelCase : Any = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : Optional[Any] = checkpoint
UpperCAmelCase : Dict = create_rename_keys(UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
UpperCAmelCase : List[Any] = SwiftFormerForImageClassification(UpperCamelCase ).eval()
hf_model.load_state_dict(UpperCamelCase )
# prepare test inputs
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : Optional[int] = processor(images=UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : Optional[int] = get_expected_output(UpperCamelCase )
UpperCAmelCase : List[str] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase , atol=1e-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A: str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase__ =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split("." ):
_SCREAMING_SNAKE_CASE : List[str] = getattr(__lowerCamelCase, __lowerCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE : str = getattr(__lowerCamelCase, __lowerCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE : Dict = value
else:
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE : Optional[int] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_SCREAMING_SNAKE_CASE : List[str] = None
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE : Any = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == "group", )
_SCREAMING_SNAKE_CASE : Optional[Any] = True
elif name.split("." )[0] == "proj":
_SCREAMING_SNAKE_CASE : Optional[int] = fairseq_model.proj
_SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_SCREAMING_SNAKE_CASE : List[Any] = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE : int = name.split(__lowerCamelCase )[0].split("." )[-2]
_SCREAMING_SNAKE_CASE : str = mapped_key.replace("*", __lowerCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE : List[Any] = "weight_g"
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "weight_v"
elif "bias" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bias"
elif "weight" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = "weight"
else:
_SCREAMING_SNAKE_CASE : List[Any] = None
set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = full_name.split("conv_layers." )[-1]
_SCREAMING_SNAKE_CASE : Optional[int] = name.split("." )
_SCREAMING_SNAKE_CASE : str = int(items[0] )
_SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_SCREAMING_SNAKE_CASE : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = emb.weight.shape
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = emb.weight.data
return lin_layer
def lowerCamelCase__ (__lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE : List[Any] = f.readlines()
_SCREAMING_SNAKE_CASE : List[str] = [line.split(" " )[0] for line in lines]
_SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__lowerCamelCase, range(4, num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = SpeechaTextaConfig.from_pretrained(
__lowerCamelCase, vocab_size=__lowerCamelCase, decoder_layers=__lowerCamelCase, do_stable_layer_norm=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=__lowerCamelCase, return_attention_mask=__lowerCamelCase, )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_SCREAMING_SNAKE_CASE : str = model[0].eval()
# set weights for wav2vec2 encoder
_SCREAMING_SNAKE_CASE : Dict = WavaVecaModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = recursively_load_weights_wavaveca(model.encoder, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = SpeechaTextaForCausalLM(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__lowerCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
_SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_SCREAMING_SNAKE_CASE : Dict = SpeechEncoderDecoderModel(encoder=__lowerCamelCase, decoder=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = False
# add projection layer
_SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(projection_layer.weight )
_SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(projection_layer.bias )
_SCREAMING_SNAKE_CASE : List[Any] = create_vocab_dict(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase, "vocab.json" ), "w" ) as fp:
json.dump(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = SpeechaTextaTokenizer(os.path.join(__lowerCamelCase, "vocab.json" ) )
tokenizer.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = hf_wavavec.config.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.pad_token_id
_SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.eos_token_id
_SCREAMING_SNAKE_CASE : List[str] = "speech_to_text_2"
_SCREAMING_SNAKE_CASE : Tuple = "wav2vec2"
_SCREAMING_SNAKE_CASE : List[Any] = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
UpperCamelCase__ =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
# Initialise PyTorch model
snake_case_ = LxmertConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 69
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "inv_freq":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "pos_bias_u" in name:
snake_case_ = None
elif "pos_bias_v" in name:
snake_case_ = None
elif "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "inv_freq" in name:
snake_case_ = 'inv_freq'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> str:
if config_path is not None:
snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' )
else:
snake_case_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ = 'rotary'
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , )
snake_case_ = True if config.feat_extract_norm == 'layer' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase )
else:
snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task='audio_pretraining' )
snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 69
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowerCamelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowerCamelCase = 'UperNetConfig'
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Union[int, Tuple[int, int]] , __snake_case : Union[int, Tuple[int, int], str] = 0 , __snake_case : bool = False , __snake_case : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
UpperCAmelCase_ = nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , padding=__snake_case , bias=__snake_case , dilation=__snake_case , )
UpperCAmelCase_ = nn.BatchNormad(__snake_case )
UpperCAmelCase_ = nn.ReLU()
def lowerCamelCase_ ( self : int , __snake_case : torch.Tensor ):
UpperCAmelCase_ = self.conv(__snake_case )
UpperCAmelCase_ = self.batch_norm(__snake_case )
UpperCAmelCase_ = self.activation(__snake_case )
return output
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int ):
super().__init__()
UpperCAmelCase_ = [
nn.AdaptiveAvgPoolad(__snake_case ),
UperNetConvModule(__snake_case , __snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : List[Any] , __snake_case : torch.Tensor ):
UpperCAmelCase_ = input
for layer in self.layers:
UpperCAmelCase_ = layer(__snake_case )
return hidden_state
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Tuple[int, ...] , __snake_case : int , __snake_case : int , __snake_case : bool ):
super().__init__()
UpperCAmelCase_ = pool_scales
UpperCAmelCase_ = align_corners
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = channels
UpperCAmelCase_ = []
for i, pool_scale in enumerate(__snake_case ):
UpperCAmelCase_ = UperNetPyramidPoolingBlock(pool_scale=__snake_case , in_channels=__snake_case , channels=__snake_case )
self.blocks.append(__snake_case )
self.add_module(str(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : List[str] , __snake_case : torch.Tensor ):
UpperCAmelCase_ = []
for ppm in self.blocks:
UpperCAmelCase_ = ppm(__snake_case )
UpperCAmelCase_ = nn.functional.interpolate(
__snake_case , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__snake_case )
return ppm_outs
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __snake_case : Dict , __snake_case : List[str] ):
super().__init__()
UpperCAmelCase_ = config
UpperCAmelCase_ = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = config.hidden_size
UpperCAmelCase_ = False
UpperCAmelCase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCAmelCase_ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCAmelCase_ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCAmelCase_ = nn.ModuleList()
UpperCAmelCase_ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCAmelCase_ = UperNetConvModule(__snake_case , self.channels , kernel_size=1 )
UpperCAmelCase_ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__snake_case )
self.fpn_convs.append(__snake_case )
UpperCAmelCase_ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCamelCase_ ( self : Tuple ):
self.apply(self._init_weights )
def lowerCamelCase_ ( self : List[Any] , __snake_case : List[Any] ):
if isinstance(__snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase_ ( self : List[Any] , __snake_case : int ):
UpperCAmelCase_ = inputs[-1]
UpperCAmelCase_ = [x]
psp_outs.extend(self.psp_modules(__snake_case ) )
UpperCAmelCase_ = torch.cat(__snake_case , dim=1 )
UpperCAmelCase_ = self.bottleneck(__snake_case )
return output
def lowerCamelCase_ ( self : str , __snake_case : torch.Tensor ):
# build laterals
UpperCAmelCase_ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__snake_case ) )
# build top-down path
UpperCAmelCase_ = len(__snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase_ = laterals[i - 1].shape[2:]
UpperCAmelCase_ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__snake_case , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCAmelCase_ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCAmelCase_ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCAmelCase_ = torch.cat(__snake_case , dim=1 )
UpperCAmelCase_ = self.fpn_bottleneck(__snake_case )
UpperCAmelCase_ = self.classifier(__snake_case )
return output
class a ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 3 , __snake_case : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
UpperCAmelCase_ = config
UpperCAmelCase_ = config.auxiliary_in_channels
UpperCAmelCase_ = config.auxiliary_channels
UpperCAmelCase_ = config.auxiliary_num_convs
UpperCAmelCase_ = config.auxiliary_concat_input
UpperCAmelCase_ = in_index
UpperCAmelCase_ = (kernel_size // 2) * dilation
UpperCAmelCase_ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__snake_case , padding=__snake_case , dilation=__snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__snake_case , padding=__snake_case , dilation=__snake_case ) )
if self.num_convs == 0:
UpperCAmelCase_ = nn.Identity()
else:
UpperCAmelCase_ = nn.Sequential(*__snake_case )
if self.concat_input:
UpperCAmelCase_ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__snake_case , padding=kernel_size // 2 )
UpperCAmelCase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCamelCase_ ( self : Tuple ):
self.apply(self._init_weights )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Tuple ):
if isinstance(__snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase_ ( self : List[str] , __snake_case : torch.Tensor ):
# just take the relevant feature maps
UpperCAmelCase_ = encoder_hidden_states[self.in_index]
UpperCAmelCase_ = self.convs(__snake_case )
if self.concat_input:
UpperCAmelCase_ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCAmelCase_ = self.classifier(__snake_case )
return output
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Tuple = UperNetConfig
lowerCAmelCase : Tuple = 'pixel_values'
lowerCAmelCase : Tuple = True
def lowerCamelCase_ ( self : List[str] , __snake_case : int ):
if isinstance(__snake_case , __snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCamelCase_ ( self : List[str] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Dict , __snake_case : int=False ):
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = value
_lowerCamelCase = r'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , _A , )
class a ( _A ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Any ):
super().__init__(__snake_case )
UpperCAmelCase_ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCAmelCase_ = UperNetHead(__snake_case , in_channels=self.backbone.channels )
UpperCAmelCase_ = UperNetFCNHead(__snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__snake_case , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , ):
UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCAmelCase_ = self.backbone.forward_with_filtered_kwargs(
__snake_case , output_hidden_states=__snake_case , output_attentions=__snake_case )
UpperCAmelCase_ = outputs.feature_maps
UpperCAmelCase_ = self.decode_head(__snake_case )
UpperCAmelCase_ = nn.functional.interpolate(__snake_case , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__snake_case )
UpperCAmelCase_ = None
if self.auxiliary_head is not None:
UpperCAmelCase_ = self.auxiliary_head(__snake_case )
UpperCAmelCase_ = nn.functional.interpolate(
__snake_case , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__snake_case )
UpperCAmelCase_ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCAmelCase_ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCAmelCase_ = loss_fct(__snake_case , __snake_case )
UpperCAmelCase_ = loss_fct(__snake_case , __snake_case )
UpperCAmelCase_ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCAmelCase_ = (logits,) + outputs[1:]
else:
UpperCAmelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['ConditionalDetrFeatureExtractor']
_lowerCamelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class a__ ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
_lowercase : Optional[int] = size if size is not None else {'''shortest_edge''': 256}
_lowercase : Any = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_lowercase : str = get_size_dict(_UpperCAmelCase )
_lowercase : int = do_resize
_lowercase : str = size
_lowercase : Dict = resample
_lowercase : Dict = do_center_crop
_lowercase : List[Any] = crop_size
_lowercase : str = do_rescale
_lowercase : str = rescale_factor
_lowercase : List[str] = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : List[str] = get_resize_output_image_size(_UpperCAmelCase , size=size["shortest_edge"] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : List[Any] = get_size_dict(_UpperCAmelCase )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : Optional[Any] = size if size is not None else self.size
_lowercase : Tuple = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowercase : Any = resample if resample is not None else self.resample
_lowercase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : int = crop_size if crop_size is not None else self.crop_size
_lowercase : Optional[int] = get_size_dict(_UpperCAmelCase )
_lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : List[Any] = image_std if image_std is not None else self.image_std
_lowercase : Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_lowercase : int = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
_lowercase : Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
_lowercase : Dict = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_lowercase : List[str] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_lowercase : int = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowercase : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 250
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 2
while digits < n:
index += 1
lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 177
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =state_dict.pop(_lowerCAmelCase )
__lowercase =val
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase =key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__lowercase =value
else:
__lowercase =value
return new_state_dict
def _A ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =''
if is_panoptic:
__lowercase ='conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase =state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowercase =state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[:256, :]
__lowercase =in_proj_bias[:256]
__lowercase =in_proj_weight[256:512, :]
__lowercase =in_proj_bias[256:512]
__lowercase =in_proj_weight[-256:, :]
__lowercase =in_proj_bias[-256:]
def _A ( ):
"""simple docstring"""
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowercase ='resnet101'
if "dc5" in model_name:
__lowercase =True
__lowercase ='panoptic' in model_name
if is_panoptic:
__lowercase =250
else:
__lowercase =91
__lowercase ='huggingface/label-files'
__lowercase ='coco-detection-id2label.json'
__lowercase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
# load image processor
__lowercase ='coco_panoptic' if is_panoptic else 'coco_detection'
__lowercase =ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
__lowercase =prepare_img()
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt' )
__lowercase =encoding['pixel_values']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
__lowercase =torch.hub.load('DeppMeng/ConditionalDETR' , _lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__lowercase =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowercase ='conditional_detr.' + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase =rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase ='conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowercase =state_dict.pop(_lowerCAmelCase )
__lowercase =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowercase =state_dict.pop(_lowerCAmelCase )
__lowercase =val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowercase =state_dict.pop(_lowerCAmelCase )
__lowercase =val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowercase =state_dict.pop(_lowerCAmelCase )
__lowercase =val
# finally, create HuggingFace model and load state dict
__lowercase =ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__lowercase =conditional_detr(_lowerCAmelCase )
__lowercase =model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 48
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _UpperCamelCase ( A , A ):
'''simple docstring'''
lowerCAmelCase__ = """resnet"""
lowerCAmelCase__ = ["""basic""", """bottleneck"""]
def __init__( self : Any , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=6_4 , _lowerCAmelCase : str=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCAmelCase : Any=[3, 4, 6, 3] , _lowerCAmelCase : List[Any]="bottleneck" , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : int=False , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =downsample_in_first_stage
__lowercase =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase) + 1)]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return 1e-3
| 48
| 1
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE__ ( __A , __A = "cpu" , __A = None ) -> None:
_snake_case = torch.load(_snake_case , map_location=_snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_snake_case = v.half()
if save_path is None: # overwrite src_path
_snake_case = src_path
torch.save(_snake_case , _snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 42
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ShapEPipeline
lowerCamelCase__ =['prompt']
lowerCamelCase__ =['prompt']
lowerCamelCase__ =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__snake_case : Optional[int] = PriorTransformer(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__snake_case : List[Any] = ShapERenderer(**a_ )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_prior
__snake_case : str = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Tuple = self.dummy_renderer
__snake_case : int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Tuple = torch.manual_seed(a_ )
else:
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu'''
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : List[str] = output.images[0]
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = torch_device == '''cpu'''
__snake_case : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**a_ )
__snake_case : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = 1
__snake_case : List[Any] = 2
__snake_case : int = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Dict = batch_size * [inputs[key]]
__snake_case : Union[str, Any] = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__snake_case : Optional[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__snake_case : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : str = pipe(
'''a shark''' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "Salesforce/blip-image-captioning-base"
__lowerCamelCase : Any = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
__lowerCamelCase : List[str] = "image_captioner"
__lowerCamelCase : Optional[int] = AutoModelForVisionaSeq
__lowerCamelCase : Any = ["image"]
__lowerCamelCase : Union[str, Any] = ["text"]
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
requires_backends(self, ["""vision"""] )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.pre_processor(images=lowerCamelCase__, return_tensors="""pt""" )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.model.generate(**lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.pre_processor.batch_decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )[0].strip()
| 115
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "conditional_detr"
__lowerCamelCase : str = ["past_key_values"]
__lowerCamelCase : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=3, lowerCamelCase__=300, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=True, lowerCamelCase__="relu", lowerCamelCase__=256, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, lowerCamelCase__=False, lowerCamelCase__="sine", lowerCamelCase__="resnet50", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=1, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=0.25, **lowerCamelCase__, ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = backbone_config.get("""model_type""" )
A : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
A : Tuple = config_class.from_dict(lowerCamelCase__ )
A : Dict = use_timm_backbone
A : int = backbone_config
A : Union[str, Any] = num_channels
A : Optional[Any] = num_queries
A : Union[str, Any] = d_model
A : str = encoder_ffn_dim
A : List[Any] = encoder_layers
A : Tuple = encoder_attention_heads
A : Union[str, Any] = decoder_ffn_dim
A : Tuple = decoder_layers
A : int = decoder_attention_heads
A : Union[str, Any] = dropout
A : List[str] = attention_dropout
A : Optional[int] = activation_dropout
A : Optional[Any] = activation_function
A : Any = init_std
A : List[Any] = init_xavier_std
A : Any = encoder_layerdrop
A : List[str] = decoder_layerdrop
A : int = encoder_layers
A : Union[str, Any] = auxiliary_loss
A : Union[str, Any] = position_embedding_type
A : Tuple = backbone
A : Dict = use_pretrained_backbone
A : int = dilation
# Hungarian matcher
A : List[Any] = class_cost
A : List[Any] = bbox_cost
A : int = giou_cost
# Loss coefficients
A : List[Any] = mask_loss_coefficient
A : Any = dice_loss_coefficient
A : int = cls_loss_coefficient
A : Tuple = bbox_loss_coefficient
A : List[Any] = giou_loss_coefficient
A : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
return self.d_model
def _lowerCAmelCase ( self ):
A : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A : List[Any] = self.backbone_config.to_dict()
A : List[str] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self ):
return 1e-5
@property
def _lowerCAmelCase ( self ):
return 12
| 115
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] = 10**12 ):
lowercase_ :Tuple = 1
lowercase_ :List[str] = 0
lowercase_ :List[str] = 1
lowercase_ :List[Any] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 223
|
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1
lowercase : List[Any] = n
lowercase : List[Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowercase : Union[str, Any] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = w
def __lowerCamelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.dp[u][v]
if __name__ == "__main__":
__a = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = ['''model.decoder.embed_positions.weights''']
def __UpperCamelCase ( _A ):
if "emb" in name:
lowerCAmelCase_ = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowerCAmelCase_ = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowerCAmelCase_ = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowerCAmelCase_ = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowerCAmelCase_ = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowerCAmelCase_ = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowerCAmelCase_ = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowerCAmelCase_ = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase_ = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = list(state_dict.keys() )
lowerCAmelCase_ = {}
for key in keys:
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = rename_keys(_A )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase_ = val[:hidden_size, :]
lowerCAmelCase_ = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase_ = val
else:
lowerCAmelCase_ = val
return state_dict, enc_dec_proj_state_dict
def __UpperCamelCase ( _A ):
if checkpoint == "small":
# default config values
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
elif checkpoint == "medium":
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 48
lowerCAmelCase_ = 24
elif checkpoint == "large":
lowerCAmelCase_ = 2048
lowerCAmelCase_ = 48
lowerCAmelCase_ = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
lowerCAmelCase_ = MusicgenDecoderConfig(
hidden_size=_A , ffn_dim=hidden_size * 4 , num_hidden_layers=_A , num_attention_heads=_A , )
return config
@torch.no_grad()
def __UpperCamelCase ( _A , _A=None , _A=None , _A="cpu" ):
lowerCAmelCase_ = MusicGen.get_pretrained(_A , device=_A )
lowerCAmelCase_ = decoder_config_from_checkpoint(_A )
lowerCAmelCase_ = fairseq_model.lm.state_dict()
lowerCAmelCase_ , lowerCAmelCase_ = rename_state_dict(
_A , hidden_size=decoder_config.hidden_size )
lowerCAmelCase_ = TaEncoderModel.from_pretrained('''t5-base''' )
lowerCAmelCase_ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowerCAmelCase_ = MusicgenForCausalLM(_A ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase_ , lowerCAmelCase_ = decoder.load_state_dict(_A , strict=_A )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_A )
if len(_A ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(_A ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
lowerCAmelCase_ = MusicgenForConditionalGeneration(text_encoder=_A , audio_encoder=_A , decoder=_A )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_A )
# check we can do a forward pass
lowerCAmelCase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCAmelCase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCAmelCase_ = model(input_ids=_A , decoder_input_ids=_A ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''t5-base''' )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowerCAmelCase_ = MusicgenProcessor(feature_extractor=_A , tokenizer=_A )
# set the appropriate bos/pad token ids
lowerCAmelCase_ = 2048
lowerCAmelCase_ = 2048
# set other default generation config params
lowerCAmelCase_ = int(30 * audio_encoder.config.frame_rate )
lowerCAmelCase_ = True
lowerCAmelCase_ = 3.0
if pytorch_dump_folder is not None:
Path(_A ).mkdir(exist_ok=_A )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_A )
processor.push_to_hub(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 167
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_A = logging.get_logger(__name__)
_A = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCamelCase ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def __UpperCamelCase ( _A , _A = None , _A = None , _A = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 167
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = ["keras_nlp"]
def __init__( self : str, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Union[str, Any] ):
requires_backends(self, ["keras_nlp"] )
| 17
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Union[str, Any] , _A : List[Any]=None , **_A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
snake_case_ : List[Any] = model
snake_case_ : Tuple = kwargs.get('model_save_dir' , _lowerCAmelCase )
snake_case_ : Optional[Any] = kwargs.get('latest_model_name' , _lowerCAmelCase )
def __call__( self : List[Any] , **_A : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case_ : Any = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def UpperCAmelCase_ ( _A : Union[str, Path] , _A : List[Any]=None , _A : Any=None ) -> Any:
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
snake_case_ : Union[str, Any] = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def UpperCAmelCase_ ( self : Dict , _A : Union[str, Path] , _A : Optional[str] = None , **_A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case_ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
snake_case_ : Any = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case_ : List[str] = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
snake_case_ : List[str] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, os.PathLike] , **_A : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , _A : Union[str, Path] , _A : Optional[Union[bool, str, None]] = None , _A : Optional[Union[str, None]] = None , _A : bool = False , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional["ort.SessionOptions"] = None , **_A : int , ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
snake_case_ : str = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
snake_case_ : Dict = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
snake_case_ : List[Any] = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
snake_case_ : int = Path(_lowerCAmelCase ).parent
snake_case_ : Union[str, Any] = Path(_lowerCAmelCase ).name
snake_case_ : str = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , _A : Union[str, Path] , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = None , **_A : int , ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[Any] = None
if len(str(_lowerCAmelCase ).split('@' ) ) == 2:
snake_case_ : List[Any] = model_id.split('@' )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 370
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : Optional[Any] , *,
_A : int = 4 , _A : int = 768 , _A : int , _A : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__()
snake_case_ : int = nn.Parameter(torch.zeros(_A ) )
# parameters for additional clip time embeddings
snake_case_ : Tuple = nn.Linear(_A , _A )
snake_case_ : List[Any] = nn.Linear(_A , _A )
# parameters for encoder hidden states
snake_case_ : Union[str, Any] = clip_extra_context_tokens
snake_case_ : str = nn.Linear(
_A , self.clip_extra_context_tokens * cross_attention_dim )
snake_case_ : Any = nn.Linear(_A , _A )
snake_case_ : Tuple = nn.LayerNorm(_A )
def UpperCAmelCase_ ( self : List[str] , *, _A : Tuple , _A : List[Any] , _A : str , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
snake_case_ : Optional[int] = image_embeddings.shape[0]
snake_case_ : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
snake_case_ : Optional[Any] = classifier_free_guidance_embeddings.expand(
_A , -1 )
snake_case_ : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
snake_case_ : str = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
snake_case_ : str = self.embedding_proj(_A )
snake_case_ : Dict = self.clip_image_embeddings_project_to_time_embeddings(_A )
snake_case_ : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
snake_case_ : List[str] = self.clip_extra_context_tokens_proj(_A )
snake_case_ : Optional[Any] = clip_extra_context_tokens.reshape(_A , -1 , self.clip_extra_context_tokens )
snake_case_ : int = clip_extra_context_tokens.permute(0 , 2 , 1 )
snake_case_ : Optional[int] = self.encoder_hidden_states_proj(_A )
snake_case_ : Any = self.text_encoder_hidden_states_norm(_A )
snake_case_ : Dict = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 88
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None ) -> str:
lowercase__ : Any = value
lowercase__ : Union[str, Any] = random()
lowercase__ : Node | None = None
lowercase__ : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
lowercase__ : Any = str(self.value ) + ''' '''
lowercase__ : str = str(self.left or '''''' )
lowercase__ : List[str] = str(self.right or '''''' )
return value + left + right
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase__ , lowercase__ : int = split(root.left , UpperCAmelCase )
return left, root
else:
lowercase__ , lowercase__ : Optional[Any] = split(root.right , UpperCAmelCase )
return root, right
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase__ : Any = merge(left.right , UpperCAmelCase )
return left
else:
lowercase__ : Dict = merge(UpperCAmelCase , right.left )
return right
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = Node(UpperCAmelCase )
lowercase__ , lowercase__ : Tuple = split(UpperCAmelCase , UpperCAmelCase )
return merge(merge(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : Dict = split(UpperCAmelCase , value - 1 )
lowercase__ , lowercase__ : int = split(UpperCAmelCase , UpperCAmelCase )
return merge(UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for arg in args.split():
if arg[0] == "+":
lowercase__ : Tuple = insert(UpperCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
lowercase__ : Union[str, Any] = erase(UpperCAmelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __UpperCamelCase ( ):
lowercase__ : Tuple = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase__ : Dict = input()
while args != "q":
lowercase__ : Any = interact_treap(UpperCAmelCase , UpperCAmelCase )
print(UpperCAmelCase )
lowercase__ : List[Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def __UpperCamelCase ( UpperCAmelCase ):
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : Optional[Any] = features[:, labels == i]
lowercase__ : Optional[Any] = data.mean(1 )
# Centralize the data of class i
lowercase__ : Dict = data - column_reshape(UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : List[str] = np.dot(UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = features.mean(1 )
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : List[str] = features[:, labels == i]
lowercase__ : int = data.shape[1]
lowercase__ : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Optional[int] = device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Check if the features have been loaded
if features.any():
lowercase__ : Optional[Any] = features.mean(1 )
# Center the dataset
lowercase__ : List[str] = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) )
lowercase__ : Optional[Any] = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1]
lowercase__ , lowercase__ : Tuple = np.linalg.eigh(UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : str = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Tuple = np.dot(filtered_eigenvectors.T , UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ : Any = eigh(
covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ : Optional[int] = np.linalg.svd(UpperCAmelCase )
lowercase__ : List[str] = svd_matrix[:, 0:dimensions]
lowercase__ : str = np.dot(filtered_svd_matrix.T , UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
lowercase__ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : Optional[Any] = np.array([0, 0, 0, 1, 1] )
lowercase__ : str = 2
lowercase__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : int = linear_discriminant_analysis(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : int = 2
lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : Dict = principal_component_analysis(UpperCAmelCase , UpperCAmelCase )
if not np.allclose(UpperCAmelCase , UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
| 1
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> list[int]:
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def UpperCamelCase_( lowerCamelCase_ ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase_( ) -> None:
_lowercase : Dict = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase_ )
print('Decoded:' , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 84
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Union[str, Any] = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
_lowercase : Any = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
| 1
|
'''simple docstring'''
from collections import defaultdict
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(UpperCAmelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCAmelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ : str = input("""Enter the first string """).strip()
a_ : Dict = input("""Enter the second string """).strip()
a_ : Dict = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 55
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
snake_case_ : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
snake_case_ : List[str] = '▁'
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase : Dict = (
AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ,normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Tuple = do_lower_case
_UpperCamelCase : str = remove_space
_UpperCamelCase : Optional[Any] = keep_accents
_UpperCamelCase : Dict = vocab_file
_UpperCamelCase : Dict = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : int = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 83
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Any:
"""simple docstring"""
for attribute in key.split(""".""" ):
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase__ = getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase__ = None
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ = True
elif name.split(""".""" )[0] == "proj":
lowercase__ = fairseq_model.proj
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(__magic_name__ )[0].split(""".""" )[-2]
lowercase__ = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
lowercase__ = """weight_g"""
elif "weight_v" in name:
lowercase__ = """weight_v"""
elif "bias" in name:
lowercase__ = """bias"""
elif "weight" in name:
lowercase__ = """weight"""
else:
lowercase__ = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = full_name.split("""conv_layers.""" )[-1]
lowercase__ = name.split(""".""" )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__ , """r""" , encoding="""utf-8""" ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.split(""" """ )[0] for line in lines]
lowercase__ = len(__magic_name__ )
lowercase__ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__magic_name__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(__magic_name__ )
lowercase__ = SpeechaTextaConfig.from_pretrained(
__magic_name__ , vocab_size=__magic_name__ , decoder_layers=__magic_name__ , do_stable_layer_norm=__magic_name__ )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(__magic_name__ )
lowercase__ = recursively_load_weights_wavaveca(model.encoder , __magic_name__ )
lowercase__ = SpeechaTextaForCausalLM(__magic_name__ )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__magic_name__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase__ = SpeechEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
lowercase__ = False
# add projection layer
lowercase__ = nn.Parameter(projection_layer.weight )
lowercase__ = nn.Parameter(projection_layer.bias )
lowercase__ = create_vocab_dict(__magic_name__ )
with open(os.path.join(__magic_name__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
lowercase__ = SpeechaTextaTokenizer(os.path.join(__magic_name__ , """vocab.json""" ) )
tokenizer.save_pretrained(__magic_name__ )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = """speech_to_text_2"""
lowercase__ = """wav2vec2"""
lowercase__ = SpeechEncoderDecoderConfig.from_dict(__magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
A : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 146
|
from __future__ import annotations
from collections import deque
class A :
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(_UpperCAmelCase )
self.set_fail_transitions()
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_UpperCAmelCase , _UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> None:
"""simple docstring"""
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCAmelCase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCAmelCase )
lowercase__ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(_UpperCAmelCase , self.adlist[child]["""value"""] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["""fail_state"""]
lowercase__ = self.find_next_state(
_UpperCAmelCase , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_UpperCAmelCase ) ):
while (
self.find_next_state(_UpperCAmelCase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["""fail_state"""]
lowercase__ = self.find_next_state(_UpperCAmelCase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 1
|
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class _lowercase :
def __init__( self : Optional[Any] , snake_case : set[int] , snake_case : Mapping[EdgeT, int] ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[Any] = vertices
UpperCamelCase_ : Union[str, Any] = {
(min(_lowerCamelCase ), max(_lowerCamelCase )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : EdgeT , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase_ : List[str] = weight
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = Graph({min(self.vertices )} , {} )
UpperCamelCase_ : int = 4_2
UpperCamelCase_ : Any = 4_2
UpperCamelCase_ : Tuple = 4_2
UpperCamelCase_ : Optional[int] = 4_2
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase_ : List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase_ : List[str] = edge
UpperCamelCase_ : Tuple = weight
subgraph.add_edge(_lowerCamelCase , _lowerCamelCase )
return subgraph
def __lowercase ( lowerCamelCase : Union[str, Any] = "p107_network.txt" ):
UpperCamelCase_ : Optional[int] = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
UpperCamelCase_ : Optional[Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : str = {}
UpperCamelCase_ : Tuple = 42
UpperCamelCase_ : Optional[Any] = 42
UpperCamelCase_ : Tuple = 42
with open(__UpperCamelCase ) as f:
UpperCamelCase_ : str = f.read().strip().split('\n' )
UpperCamelCase_ : List[str] = [line.split(',' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase_ : List[Any] = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase_ : int = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = graph.prims_algorithm()
UpperCamelCase_ : List[str] = sum(graph.edges.values() )
UpperCamelCase_ : Optional[Any] = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 175
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = ["input_features", "attention_mask"]
def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase )
__A = num_mel_bins
__A = do_ceptral_normalize
__A = normalize_means
__A = normalize_vars
__A = True
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ):
'''simple docstring'''
__A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
__A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ):
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
__A = x[:input_length].mean(axis=0 )
__A = np.subtract(_lowerCamelCase, _lowerCamelCase )
if normalize_vars:
__A = x[:input_length].std(axis=0 )
__A = np.divide(_lowerCamelCase, _lowerCamelCase )
if input_length < x.shape[0]:
__A = padding_value
# make sure array is in float32
__A = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(_lowerCamelCase, _lowerCamelCase )
]
def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__A = is_batched_numpy or (
isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ):
__A = np.asarray(_lowerCamelCase, dtype=np.floataa )
elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A = [raw_speech]
# extract fbank features
__A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__A = BatchFeature({'''input_features''': features} )
__A = self.pad(
_lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, )
# make sure list is in array format
__A = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], _lowerCamelCase ):
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features]
__A = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A = (
np.array(_lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A = self.normalize(
padded_inputs['''input_features'''], attention_mask=_lowerCamelCase )
if return_tensors is not None:
__A = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 266
| 0
|
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) )
def a__ ( __lowercase ) -> None:
if point:
if isinstance(__lowercase , __lowercase ):
for item in point:
if not isinstance(__lowercase , (int, float) ):
_A = (
"Expected a list of numbers as input, found "
f"""{type(__lowercase ).__name__}"""
)
raise TypeError(__lowercase )
else:
_A = f"""Expected a list of numbers as input, found {type(__lowercase ).__name__}"""
raise TypeError(__lowercase )
else:
raise ValueError("Missing an input" )
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowercase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowercase , sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 163
| 1
|
from __future__ import annotations
a_ :Tuple = [True] * 1_000_001
a_ :Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a_ :str = False
i += 1
def lowercase_ (A : int ):
return seive[n]
def lowercase_ (A : int ):
return any(digit in '02468' for digit in str(A ) )
def lowercase_ (A : int = 1_0_0_0_0_0_0 ):
snake_case__ : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A ) and not contains_an_even_digit(A ):
snake_case__ : List[Any] = str(A )
snake_case__ : Dict = [int(str_num[j:] + str_num[:j] ) for j in range(len(A ) )]
if all(is_prime(A ) for i in list_nums ):
result.append(A )
return result
def lowercase_ ():
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 277
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ :str = logging.get_logger(__name__)
def lowercase_ (A : str ):
snake_case__ : Tuple = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
snake_case__ : List[Any] = MaskFormerConfig(backbone_config=A )
snake_case__ : Union[str, Any] = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
snake_case__ : Dict = 8_4_7
snake_case__ : List[str] = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
snake_case__ : Union[str, Any] = 1_5_0
snake_case__ : Any = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
snake_case__ : List[str] = 1_7_1
snake_case__ : Union[str, Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
snake_case__ : Dict = 1_3_3
snake_case__ : str = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
snake_case__ : List[str] = 1_9
snake_case__ : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
snake_case__ : Tuple = 6_5
snake_case__ : List[str] = 'mapillary-vistas-id2label.json'
snake_case__ : Dict = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : List[str] = {int(A ): v for k, v in idalabel.items()}
return config
def lowercase_ (A : Any ):
snake_case__ : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowercase_ (A : Tuple , A : Tuple , A : Optional[Any] ):
snake_case__ : Optional[int] = dct.pop(A )
snake_case__ : Union[str, Any] = val
def lowercase_ (A : Optional[Any] , A : Tuple ):
snake_case__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case__ : Tuple = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : str = in_proj_weight[:dim, :]
snake_case__ : int = in_proj_bias[: dim]
snake_case__ : List[Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case__ : List[str] = in_proj_bias[
dim : dim * 2
]
snake_case__ : List[Any] = in_proj_weight[
-dim :, :
]
snake_case__ : Dict = in_proj_bias[-dim :]
# fmt: on
def lowercase_ (A : List[str] , A : List[Any] ):
# fmt: off
snake_case__ : str = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
snake_case__ : int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[: hidden_size, :]
snake_case__ : Tuple = in_proj_bias[:config.hidden_size]
snake_case__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : Any = in_proj_weight[-hidden_size :, :]
snake_case__ : int = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[: hidden_size, :]
snake_case__ : Optional[Any] = in_proj_bias[:config.hidden_size]
snake_case__ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : List[str] = in_proj_weight[-hidden_size :, :]
snake_case__ : str = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase_ ():
snake_case__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : int = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase_ (A : str , A : str , A : str , A : bool = False ):
snake_case__ : Optional[int] = get_maskformer_config(A )
# load original state_dict
with open(A , 'rb' ) as f:
snake_case__ : List[Any] = pickle.load(A )
snake_case__ : Optional[int] = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case__ : List[str] = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# update to torch tensors
for key, value in state_dict.items():
snake_case__ : int = torch.from_numpy(A )
# load 🤗 model
snake_case__ : str = MaskFormerForInstanceSegmentation(A )
model.eval()
for name, param in model.named_parameters():
print(A , param.shape )
snake_case__ , snake_case__ : Union[str, Any] = model.load_state_dict(A , strict=A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
snake_case__ : Optional[Any] = prepare_img()
if "vistas" in model_name:
snake_case__ : int = 6_5
elif "cityscapes" in model_name:
snake_case__ : Dict = 6_5_5_3_5
else:
snake_case__ : Tuple = 2_5_5
snake_case__ : Optional[int] = True if 'ade' in model_name else False
snake_case__ : Dict = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A )
snake_case__ : Any = image_processor(A , return_tensors='pt' )
snake_case__ : Any = model(**A )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case__ : Tuple = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ :Dict = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 277
| 1
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( __a :Tuple , __a :Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
A__ = Image.open(requests.get(__a , stream=__a ).raw ).convert("""RGB""" )
A__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
A__ = transform(__a ).unsqueeze(0 ).to(__a )
return image
def __lowerCamelCase ( __a :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "visual_encoder" in key:
A__ = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __a )
if "blocks" in key:
A__ = re.sub(R"""blocks""" , """layers""" , __a )
if "attn" in key:
A__ = re.sub(R"""attn""" , """self_attn""" , __a )
if "norm1" in key:
A__ = re.sub(R"""norm1""" , """layer_norm1""" , __a )
if "norm2" in key:
A__ = re.sub(R"""norm2""" , """layer_norm2""" , __a )
if "encoder.norm" in key:
A__ = re.sub(R"""encoder.norm""" , """post_layernorm""" , __a )
if "encoder.patch_embed.proj" in key:
A__ = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __a )
if "encoder.pos_embed" in key:
A__ = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __a )
if "encoder.cls_token" in key:
A__ = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __a )
if "self_attn" in key:
A__ = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __a )
return key
@torch.no_grad()
def __lowerCamelCase ( __a :Optional[int] , __a :Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
A__ = BlipConfig.from_pretrained(__a )
else:
A__ = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
A__ = BlipForConditionalGeneration(__a ).eval()
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
A__ = blip_decoder(pretrained=__a , image_size=3_8_4 , vit="""base""" )
A__ = pt_model.eval()
A__ = pt_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(__a )
A__ = rename_key(__a )
A__ = value
hf_model.load_state_dict(__a )
A__ = 3_8_4
A__ = load_demo_image(image_size=__a , device="""cpu""" )
A__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A__ = tokenizer(["""a picture of"""] ).input_ids
A__ = hf_model.generate(__a , __a )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
A__ = hf_model.generate(__a )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__a )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A__ = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
A__ = blip_vqa(pretrained=__a , image_size=__a , vit="""base""" )
vqa_model.eval()
A__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(__a )
A__ = rename_key(__a )
A__ = value
A__ = BlipForQuestionAnswering(__a )
hf_vqa_model.load_state_dict(__a )
A__ = ["""How many dogs are in this image?"""]
A__ = tokenizer(__a , return_tensors="""pt""" ).input_ids
A__ = hf_vqa_model.generate(__a , __a )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
A__ = blip_itm(pretrained=__a , image_size=__a , vit="""base""" )
itm_model.eval()
A__ = itm_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(__a )
A__ = rename_key(__a )
A__ = value
A__ = BlipForImageTextRetrieval(__a )
A__ = ["""A picture of a woman with a dog sitting in a beach"""]
A__ = tokenizer(
__a , return_tensors="""pt""" , padding="""max_length""" , truncation=__a , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(__a )
hf_itm_model.eval()
A__ = hf_itm_model(__a , __a , use_itm_head=__a )
A__ = hf_itm_model(__a , __a , use_itm_head=__a )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
A : Union[str, Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 276
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Optional[int] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 112
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE :List[Any] = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE :List[str] = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
__A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__A = len(self.fairseq_tokens_to_ids )
__A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self : int ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 15
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase : str = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : Union[str, Any] = 'ResNetConfig'
# Base docstring
__lowerCamelCase : Tuple = 'microsoft/resnet-50'
__lowerCamelCase : str = [1, 2048, 7, 7]
# Image classification docstring
__lowerCamelCase : Any = 'microsoft/resnet-50'
__lowerCamelCase : Tuple = 'tiger cat'
__lowerCamelCase : str = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __A : int , __A : int , __A : int = 3 , __A : int = 1 , __A : str = "relu" ):
super().__init__()
snake_case__ : List[Any] = nn.Convad(
_lowercase , _lowercase , kernel_size=_lowercase , stride=_lowercase , padding=kernel_size // 2 , bias=_lowercase )
snake_case__ : Dict = nn.BatchNormad(_lowercase )
snake_case__ : Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowercase ( self : str , __A : Tensor ):
snake_case__ : Optional[int] = self.convolution(_lowercase )
snake_case__ : str = self.normalization(_lowercase )
snake_case__ : Optional[Any] = self.activation(_lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __A : ResNetConfig ):
super().__init__()
snake_case__ : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case__ : List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case__ : str = config.num_channels
def _lowercase ( self : Optional[int] , __A : Tensor ):
snake_case__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
snake_case__ : List[str] = self.embedder(_lowercase )
snake_case__ : Dict = self.pooler(_lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : int , __A : int , __A : int = 2 ):
super().__init__()
snake_case__ : List[str] = nn.Convad(_lowercase , _lowercase , kernel_size=1 , stride=_lowercase , bias=_lowercase )
snake_case__ : Dict = nn.BatchNormad(_lowercase )
def _lowercase ( self : Union[str, Any] , __A : Tensor ):
snake_case__ : Optional[int] = self.convolution(_lowercase )
snake_case__ : Dict = self.normalization(_lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __A : int , __A : int , __A : int = 1 , __A : str = "relu" ):
super().__init__()
snake_case__ : str = in_channels != out_channels or stride != 1
snake_case__ : List[str] = (
ResNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
snake_case__ : Dict = nn.Sequential(
ResNetConvLayer(_lowercase , _lowercase , stride=_lowercase ) , ResNetConvLayer(_lowercase , _lowercase , activation=_lowercase ) , )
snake_case__ : Tuple = ACTaFN[activation]
def _lowercase ( self : str , __A : Optional[int] ):
snake_case__ : List[Any] = hidden_state
snake_case__ : int = self.layer(_lowercase )
snake_case__ : Optional[int] = self.shortcut(_lowercase )
hidden_state += residual
snake_case__ : Any = self.activation(_lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , __A : int , __A : int , __A : int = 1 , __A : str = "relu" , __A : int = 4 ):
super().__init__()
snake_case__ : Any = in_channels != out_channels or stride != 1
snake_case__ : Tuple = out_channels // reduction
snake_case__ : Optional[Any] = (
ResNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
snake_case__ : List[Any] = nn.Sequential(
ResNetConvLayer(_lowercase , _lowercase , kernel_size=1 ) , ResNetConvLayer(_lowercase , _lowercase , stride=_lowercase ) , ResNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=_lowercase ) , )
snake_case__ : Optional[Any] = ACTaFN[activation]
def _lowercase ( self : Dict , __A : Union[str, Any] ):
snake_case__ : Dict = hidden_state
snake_case__ : List[str] = self.layer(_lowercase )
snake_case__ : Dict = self.shortcut(_lowercase )
hidden_state += residual
snake_case__ : Optional[int] = self.activation(_lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : ResNetConfig , __A : int , __A : int , __A : int = 2 , __A : int = 2 , ):
super().__init__()
snake_case__ : str = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
snake_case__ : Tuple = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowercase , _lowercase , stride=_lowercase , activation=config.hidden_act ) , *[layer(_lowercase , _lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowercase ( self : List[str] , __A : Tensor ):
snake_case__ : Any = input
for layer in self.layers:
snake_case__ : int = layer(_lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __A : ResNetConfig ):
super().__init__()
snake_case__ : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case__ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowercase , _lowercase , _lowercase , depth=_lowercase ) )
def _lowercase ( self : Optional[Any] , __A : Tensor , __A : bool = False , __A : bool = True ):
snake_case__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ : List[Any] = hidden_states + (hidden_state,)
snake_case__ : str = stage_module(_lowercase )
if output_hidden_states:
snake_case__ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowercase , hidden_states=_lowercase , )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
"""simple docstring"""
a_ = ResNetConfig
a_ = "resnet"
a_ = "pixel_values"
a_ = True
def _lowercase ( self : Any , __A : Optional[int] ):
if isinstance(_lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowercase ( self : Optional[Any] , __A : Dict , __A : int=False ):
if isinstance(_lowercase , _lowercase ):
snake_case__ : List[Any] = value
__lowerCamelCase : List[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase : List[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , _lowerCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , __A : Dict ):
super().__init__(_lowercase )
snake_case__ : Any = config
snake_case__ : int = ResNetEmbeddings(_lowercase )
snake_case__ : Any = ResNetEncoder(_lowercase )
snake_case__ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : str , __A : Tensor , __A : Optional[bool] = None , __A : Optional[bool] = None ):
snake_case__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : int = self.embedder(_lowercase )
snake_case__ : Union[str, Any] = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
snake_case__ : List[Any] = encoder_outputs[0]
snake_case__ : Optional[int] = self.pooler(_lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowerCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Tuple ):
super().__init__(_lowercase )
snake_case__ : List[str] = config.num_labels
snake_case__ : Optional[int] = ResNetModel(_lowercase )
# classification head
snake_case__ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : int , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.LongTensor] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Optional[int] = self.resnet(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
snake_case__ : Any = outputs.pooler_output if return_dict else outputs[1]
snake_case__ : Union[str, Any] = self.classifier(_lowercase )
snake_case__ : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ : Union[str, Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ : Tuple = "single_label_classification"
else:
snake_case__ : Dict = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case__ : Optional[int] = MSELoss()
if self.num_labels == 1:
snake_case__ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ : Optional[Any] = loss_fct(_lowercase , _lowercase )
elif self.config.problem_type == "single_label_classification":
snake_case__ : Optional[Any] = CrossEntropyLoss()
snake_case__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ : int = BCEWithLogitsLoss()
snake_case__ : Dict = loss_fct(_lowercase , _lowercase )
if not return_dict:
snake_case__ : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _lowerCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : int ):
super().__init__(_lowercase )
super()._init_backbone(_lowercase )
snake_case__ : Dict = [config.embedding_size] + config.hidden_sizes
snake_case__ : Optional[Any] = ResNetEmbeddings(_lowercase )
snake_case__ : Any = ResNetEncoder(_lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@replace_return_docstrings(output_type=_lowercase , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self : int , __A : Tensor , __A : Optional[bool] = None , __A : Optional[bool] = None ):
snake_case__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : int = self.embedder(_lowercase )
snake_case__ : Dict = self.encoder(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
snake_case__ : Dict = outputs.hidden_states
snake_case__ : List[str] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case__ : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowercase , )
| 370
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> bool:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = [int(__magic_name__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(__magic_name__ ) == 4 and all(0 <= int(__magic_name__ ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase_ : int = input().strip()
UpperCAmelCase_ : Any = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 38
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase : Any = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Union[str, Any]:
__snake_case , __snake_case: int = create_model(
"""HTSAT-tiny""" , """roberta""" , SCREAMING_SNAKE_CASE__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=SCREAMING_SNAKE_CASE__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Optional[Any] = {}
__snake_case: int = r""".*sequential.(\d+).*"""
__snake_case: List[str] = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case: Tuple = key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
# replace sequential layers with list
__snake_case: Optional[int] = re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(1)
__snake_case: str = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(SCREAMING_SNAKE_CASE__)//3}.linear.''')
elif re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Any = int(re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(1))
# Because in CLAP they use `nn.Sequential`...
__snake_case: Dict = 1 if projecton_layer == 0 else 2
__snake_case: Any = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''')
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case: List[str] = value
__snake_case: Optional[Any] = mixed_qkv.size(0) // 3
__snake_case: Union[str, Any] = mixed_qkv[:qkv_dim]
__snake_case: Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case: int = mixed_qkv[qkv_dim * 2 :]
__snake_case: Optional[Any] = query_layer
__snake_case: str = key_layer
__snake_case: int = value_layer
else:
__snake_case: Dict = value
return model_state_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Optional[Any]:
__snake_case , __snake_case: List[str] = init_clap(SCREAMING_SNAKE_CASE__ , enable_fusion=SCREAMING_SNAKE_CASE__)
clap_model.eval()
__snake_case: List[str] = clap_model.state_dict()
__snake_case: Optional[int] = rename_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: Any = ClapConfig()
__snake_case: Dict = enable_fusion
__snake_case: List[str] = ClapModel(SCREAMING_SNAKE_CASE__)
# ignore the spectrogram embedding layer
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__)
model.save_pretrained(SCREAMING_SNAKE_CASE__)
transformers_config.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__UpperCAmelCase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 111
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowercase : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : tuple , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=False , ):
output_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , use_external_data_format=lowerCamelCase__ , enable_onnx_checker=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
else:
export(
lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , opset_version=lowerCamelCase__ , )
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ):
__a : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a : Tuple = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__a : str = '''cpu'''
__a : Dict = Path(lowerCamelCase__ )
# VAE DECODER
__a : Optional[int] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__a : Tuple = vae_decoder.config.latent_channels
# forward only through the decoder part
__a : Any = vae_decoder.decode
onnx_export(
lowerCamelCase__ , model_args=(
torch.randn(1 , lowerCamelCase__ , 25 , 25 ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowerCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__lowercase : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 354
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.