code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase ( unittest.TestCase , a ):
def lowercase ( self ):
lowerCAmelCase : Dict = load_tool('text-classification' )
self.tool.setup()
lowerCAmelCase : int = load_tool('text-classification' , remote=snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
def lowercase ( self ):
lowerCAmelCase : Dict = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
def lowercase ( self ):
lowerCAmelCase : int = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Tuple = """deberta-v2"""
def __init__( self , snake_case__=12_8100 , snake_case__=1536 , snake_case__=24 , snake_case__=24 , snake_case__=6144 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0 , snake_case__=0.0_2 , snake_case__=1e-7 , snake_case__=False , snake_case__=-1 , snake_case__=0 , snake_case__=True , snake_case__=None , snake_case__=0 , snake_case__="gelu" , **snake_case__ , ):
super().__init__(**snake_case__ )
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Any = relative_attention
lowerCAmelCase : Dict = max_relative_positions
lowerCAmelCase : int = pad_token_id
lowerCAmelCase : Union[str, Any] = position_biased_input
# Backwards compatibility
if type(snake_case__ ) == str:
lowerCAmelCase : int = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : List[str] = pos_att_type
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : Dict = kwargs.get('pooler_hidden_size' , snake_case__ )
lowerCAmelCase : Optional[int] = pooler_dropout
lowerCAmelCase : Optional[Any] = pooler_hidden_act
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase ( self ):
return 12
def lowercase ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , snake_case__ = None , ):
lowerCAmelCase : Tuple = super().generate_dummy_inputs(preprocessor=snake_case__ , framework=snake_case__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = IFInpaintingSuperResolutionPipeline
_lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
_lowerCamelCase : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase ( self ):
return self._get_superresolution_dummy_components()
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : str = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self ):
self._test_save_load_local()
def lowercase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Dict = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ['ConditionalDetrFeatureExtractor']
_lowerCAmelCase : Dict = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
_lowerCAmelCase : Any = 1.6021E-19 # units = C
def __UpperCamelCase ( _A : float , _A : float , _A : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __UpperCamelCase ( _A : Optional[Any] ) -> int:
"""simple docstring"""
if isinstance(_A , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase :
def lowercase ( self , snake_case__ , snake_case__ ):
pass
def lowercase ( self ):
pass
def lowercase ( self ):
pass
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
lowerCAmelCase : Any = TFVisionTextDualEncoderModel(snake_case__ )
lowerCAmelCase : int = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
lowerCAmelCase : List[str] = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=snake_case__ , text_model=snake_case__ )
lowerCAmelCase : Optional[Any] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
lowerCAmelCase : List[Any] = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase : str = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowerCAmelCase : Optional[int] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
lowerCAmelCase : Any = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=snake_case__ , text_model=snake_case__ )
lowerCAmelCase : str = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowerCAmelCase : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowerCAmelCase : Any = TFVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowerCAmelCase : Union[str, Any] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
lowerCAmelCase : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-5 )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase : str = TFVisionTextDualEncoderModel(vision_model=snake_case__ , text_model=snake_case__ )
lowerCAmelCase : List[Any] = model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
lowerCAmelCase : str = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase : Optional[Any] = to_atuple(vision_model.config.image_size )
lowerCAmelCase : List[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ , snake_case__ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowercase ( self ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
lowerCAmelCase : Dict = model_a(**snake_case__ )
lowerCAmelCase : List[str] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowerCAmelCase : Dict = model_a(**snake_case__ )
lowerCAmelCase : int = after_outputs[0].numpy()
lowerCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-5 )
@require_tf
class lowerCAmelCase ( a , unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase : Dict = 13
lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = TFViTModel(snake_case__ , name='vision_model' )
lowerCAmelCase : Dict = TFBertModel(snake_case__ , name='text_model' )
return vision_model, text_model
def lowercase ( self ):
lowerCAmelCase : Tuple = TFViTModelTester(self )
lowerCAmelCase : Any = TFBertModelTester(self )
lowerCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[str] = vision_config_and_inputs
(
lowerCAmelCase
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase ( a , unittest.TestCase ):
def lowercase ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowerCAmelCase : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
lowerCAmelCase : Optional[int] = self.get_vision_text_model(snake_case__ , snake_case__ )
lowerCAmelCase : str = TFVisionTextDualEncoderModel(vision_model=snake_case__ , text_model=snake_case__ )
lowerCAmelCase : Optional[int] = model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
lowerCAmelCase : str = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase : Union[str, Any] = to_atuple(vision_model.config.image_size )
lowerCAmelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase : Tuple = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = TFDeiTModel(snake_case__ , name='vision_model' )
lowerCAmelCase : List[str] = TFRobertaModel(snake_case__ , name='text_model' )
return vision_model, text_model
def lowercase ( self ):
lowerCAmelCase : str = TFDeiTModelTester(self )
lowerCAmelCase : int = TFRobertaModelTester(self )
lowerCAmelCase : List[str] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase : Any = vision_config_and_inputs
(
lowerCAmelCase
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase ( a , unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase : List[Any] = 13
lowerCAmelCase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = TFCLIPVisionModel(snake_case__ , name='vision_model' )
lowerCAmelCase : List[str] = TFBertModel(snake_case__ , name='text_model' )
return vision_model, text_model
def lowercase ( self ):
lowerCAmelCase : Tuple = TFCLIPVisionModelTester(self )
lowerCAmelCase : List[str] = TFBertModelTester(self )
lowerCAmelCase : int = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = vision_config_and_inputs
(
lowerCAmelCase
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : int = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=snake_case__ )
lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase : Tuple = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=snake_case__ , padding=snake_case__ , return_tensors='np' )
lowerCAmelCase : List[str] = model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase : List[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case__ , atol=1e-3 ) )
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Tuple = 13
lowerCAmelCase : List[Any] = 7
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Tuple = False
lowerCAmelCase : int = True
lowerCAmelCase : str = 99
lowerCAmelCase : str = 32
lowerCAmelCase : Tuple = 2
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Dict = 37
lowerCAmelCase : int = 'gelu'
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : str = 512
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Tuple = 0.0_2
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : Union[str, Any] = None
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Any = TFDistilBertModel(config=snake_case__ )
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : List[Any] = model(snake_case__ )
lowerCAmelCase : List[str] = [input_ids, input_mask]
lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = TFDistilBertForMaskedLM(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = TFDistilBertForQuestionAnswering(config=snake_case__ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : List[str] = TFDistilBertForSequenceClassification(snake_case__ )
lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(snake_case__ )
lowerCAmelCase : Dict = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Any = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : str = TFDistilBertForTokenClassification(snake_case__ )
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(lowerCAmelCase) : Tuple = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase : Optional[int] = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
def lowercase ( self ):
lowerCAmelCase : str = TFDistilBertModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , dim=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case__ )
@slow
def lowercase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase : Any = TFDistilBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Any = model(snake_case__ )[0]
lowerCAmelCase : List[str] = [1, 6, 768]
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=None , snake_case__=2 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : str = image_size
lowerCAmelCase : str = patch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : Any = use_labels
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : int = scope
lowerCAmelCase : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase : List[str] = (image_size // patch_size) ** 2
lowerCAmelCase : str = num_patches + 2
def lowercase ( self ):
lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def lowercase ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = TFDeiTModel(config=snake_case__ )
lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = TFDeiTForMaskedImageModeling(config=snake_case__ )
lowerCAmelCase : int = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : int = 1
lowerCAmelCase : Optional[int] = TFDeiTForMaskedImageModeling(snake_case__ )
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Any = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = self.type_sequence_label_size
lowerCAmelCase : str = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = TFDeiTForImageClassification(snake_case__ )
lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Tuple = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase : Dict = config_and_inputs
lowerCAmelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_lowerCamelCase : Any = False
_lowerCamelCase : str = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = False
def lowercase ( self ):
lowerCAmelCase : List[str] = TFDeiTModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowercase ( self ):
pass
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Any = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(snake_case__ )
lowerCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Tuple = [*signature.parameters.keys()]
lowerCAmelCase : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase ( self ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Tuple = TFDeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowercase ( self ):
lowerCAmelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : List[str] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
lowerCAmelCase : int = model(**snake_case__ )
# verify the logits
lowerCAmelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Tuple = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Optional[Any] = """lxmert"""
_lowerCamelCase : List[Any] = {}
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=9500 , snake_case__=1600 , snake_case__=400 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=9 , snake_case__=5 , snake_case__=5 , snake_case__=2048 , snake_case__=4 , snake_case__=6.6_7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Union[str, Any] = type_vocab_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : List[Any] = num_qa_labels
lowerCAmelCase : Dict = num_object_labels
lowerCAmelCase : str = num_attr_labels
lowerCAmelCase : Union[str, Any] = l_layers
lowerCAmelCase : int = x_layers
lowerCAmelCase : Dict = r_layers
lowerCAmelCase : int = visual_feat_dim
lowerCAmelCase : Union[str, Any] = visual_pos_dim
lowerCAmelCase : List[Any] = visual_loss_normalizer
lowerCAmelCase : Dict = task_matched
lowerCAmelCase : List[str] = task_mask_lm
lowerCAmelCase : str = task_obj_predict
lowerCAmelCase : Optional[int] = task_qa
lowerCAmelCase : int = visual_obj_loss
lowerCAmelCase : Dict = visual_attr_loss
lowerCAmelCase : str = visual_feat_loss
lowerCAmelCase : Optional[Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case__ )
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[Any] = len(snake_case__ )
lowerCAmelCase : Tuple = [0] * len_array
if len_array > 0:
lowerCAmelCase : Optional[int] = array[0]
for i in range(1 , snake_case__ ):
lowerCAmelCase : Any = self.prefix_sum[i - 1] + array[i]
def lowercase ( self , snake_case__ , snake_case__ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(snake_case__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase : Dict = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
lowerCAmelCase : Union[str, Any] = Counter(snake_case__ )
lowerCAmelCase : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def lowercase ( self , snake_case__ , snake_case__=False ):
lowerCAmelCase : Tuple = super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def __UpperCamelCase ( _A : str ) -> Tuple[Optional[str], str]:
"""simple docstring"""
lowerCAmelCase : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase : List[Any] = full_content[1:].index('---' ) + 1
lowerCAmelCase : Tuple = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_A )
class lowerCAmelCase ( a ):
# class attributes
_lowerCamelCase : int = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def lowercase ( cls , snake_case__ ):
with open(snake_case__ , encoding='utf-8' ) as readme_file:
lowerCAmelCase : Tuple = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def lowercase ( self , snake_case__ ):
if path.exists():
with open(snake_case__ , encoding='utf-8' ) as readme_file:
lowerCAmelCase : str = readme_file.read()
else:
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Any = self._to_readme(snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(snake_case__ )
def lowercase ( self , snake_case__ = None ):
if readme_content is not None:
lowerCAmelCase : Tuple = _split_yaml_from_readme(snake_case__ )
lowerCAmelCase : Optional[int] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowerCAmelCase : List[str] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def lowercase ( cls , snake_case__ ):
lowerCAmelCase : Dict = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase : List[str] = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def lowercase ( self ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding='utf-8' , ).decode('utf-8' )
_lowerCAmelCase : int = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCAmelCase : str = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowerCAmelCase : Optional[int] = ap.parse_args()
_lowerCAmelCase : Dict = Path(args.readme_filepath)
_lowerCAmelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
from collections import namedtuple
_lowerCAmelCase : Tuple = namedtuple('from_to', 'from_ to')
_lowerCAmelCase : List[Any] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 264.172),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 35.3147),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4226.75),
}
def __UpperCamelCase ( _A : float , _A : str , _A : str ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ', '.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ', '.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
def __UpperCamelCase ( _A : dict ) -> set:
"""simple docstring"""
lowerCAmelCase : int = set()
# edges = list of graph's edges
lowerCAmelCase : List[str] = get_edges(_A )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase : Tuple = edges.pop()
chosen_vertices.add(_A )
chosen_vertices.add(_A )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_A )
return chosen_vertices
def __UpperCamelCase ( _A : dict ) -> set:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(snake_case__ ):
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase : Any = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def lowercase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(snake_case__ ):
lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase : int = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def lowercase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase : str = FlaxBertModel.from_pretrained(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
@slow
def lowercase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase : int = AutoTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase : List[str] = FlaxRobertaModel.from_pretrained(snake_case__ )
lowerCAmelCase : str = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
def lowercase ( self ):
with self.assertRaisesRegex(
snake_case__ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase : Dict = FlaxAutoModel.from_pretrained('bert-base' )
def lowercase ( self ):
with self.assertRaisesRegex(
snake_case__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase : int = FlaxAutoModel.from_pretrained(snake_case__ , revision='aaaaaa' )
def lowercase ( self ):
with self.assertRaisesRegex(
snake_case__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
lowerCAmelCase : str = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase ( self ):
with self.assertRaisesRegex(snake_case__ , 'Use `from_pt=True` to load this model' ):
lowerCAmelCase : Union[str, Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : Any = embeddings_size
lowerCAmelCase : Optional[Any] = hidden_sizes
lowerCAmelCase : List[str] = depths
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : str = num_labels
lowerCAmelCase : Dict = scope
lowerCAmelCase : Optional[int] = len(snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Any = self.get_config()
return config, pixel_values
def lowercase ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = FlaxRegNetModel(config=snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : List[str] = FlaxRegNetForImageClassification(config=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
lowerCAmelCase : int = FlaxRegNetModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ):
return
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self ):
pass
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[str] = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase ( self ):
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : List[str] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ , **snake_case__ ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : int = prepare_img()
lowerCAmelCase : Any = image_processor(images=snake_case__ , return_tensors='np' )
lowerCAmelCase : Dict = model(**snake_case__ )
# verify the logits
lowerCAmelCase : Union[str, Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : List[str] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
_lowerCamelCase : str
_lowerCamelCase : List[str]
_lowerCamelCase : Optional[List[str]]
@dataclass
class lowerCAmelCase :
_lowerCamelCase : List[int]
_lowerCamelCase : List[int]
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[List[int]] = None
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """train"""
_lowerCamelCase : Optional[int] = """dev"""
_lowerCamelCase : Tuple = """test"""
class lowerCAmelCase :
@staticmethod
def lowercase ( snake_case__ , snake_case__ ):
raise NotImplementedError
@staticmethod
def lowercase ( snake_case__ ):
raise NotImplementedError
@staticmethod
def lowercase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__="[CLS]" , snake_case__=1 , snake_case__="[SEP]" , snake_case__=False , snake_case__=False , snake_case__=0 , snake_case__=0 , snake_case__=-100 , snake_case__=0 , snake_case__=True , ):
lowerCAmelCase : Optional[int] = {label: i for i, label in enumerate(snake_case__ )}
lowerCAmelCase : Tuple = []
for ex_index, example in enumerate(snake_case__ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' , snake_case__ , len(snake_case__ ) )
lowerCAmelCase : Dict = []
lowerCAmelCase : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase : Any = tokenizer.tokenize(snake_case__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case__ ) > 0:
tokens.extend(snake_case__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(snake_case__ ) > max_seq_length - special_tokens_count:
lowerCAmelCase : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase : Optional[Any] = [sequence_a_segment_id] * len(snake_case__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase : List[str] = [cls_token] + tokens
lowerCAmelCase : List[str] = [pad_token_label_id] + label_ids
lowerCAmelCase : List[str] = [cls_token_segment_id] + segment_ids
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase : Dict = [1 if mask_padding_with_zero else 0] * len(snake_case__ )
# Zero-pad up to the sequence length.
lowerCAmelCase : Any = max_seq_length - len(snake_case__ )
if pad_on_left:
lowerCAmelCase : Union[str, Any] = ([pad_token] * padding_length) + input_ids
lowerCAmelCase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase : Union[str, Any] = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase : Any = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case__ ) == max_seq_length
assert len(snake_case__ ) == max_seq_length
assert len(snake_case__ ) == max_seq_length
assert len(snake_case__ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(snake_case__ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(snake_case__ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(snake_case__ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(snake_case__ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(snake_case__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase : Optional[Any] = None
features.append(
InputFeatures(
input_ids=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , label_ids=snake_case__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase ( a ):
_lowerCamelCase : List[InputFeatures]
_lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=False , snake_case__ = Split.train , ):
# Load data features from cache or dataset file
lowerCAmelCase : List[Any] = os.path.join(
snake_case__ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(snake_case__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : str = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase : Any = torch.load(snake_case__ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(snake_case__ , snake_case__ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase : str = token_classification_task.convert_examples_to_features(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self , snake_case__ ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
_lowerCamelCase : List[InputFeatures]
_lowerCamelCase : int = -100
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=False , snake_case__ = Split.train , ):
lowerCAmelCase : Any = token_classification_task.read_examples_from_file(snake_case__ , snake_case__ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase : Dict = token_classification_task.convert_examples_to_features(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case__ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase : List[Any] = tf.data.Dataset.from_generator(
snake_case__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase : Tuple = tf.data.Dataset.from_generator(
snake_case__ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , snake_case__ ):
return self.features[i]
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_A , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_A , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_A , help='where to store parsed gold_data_path file' , )
lowerCAmelCase : str = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCAmelCase : Any = json.load(_A )
for dpr_record in tqdm(_A ):
lowerCAmelCase : Dict = dpr_record['question']
lowerCAmelCase : Tuple = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_A ) + '\n' )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase : Any = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_lowerCAmelCase : Any = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase : List[Any] = {f"""funnel-transformer/{name}""": 512 for name in _model_names}
_lowerCAmelCase : Optional[Any] = {f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Dict = FunnelTokenizer
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = 2
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__="##" , **snake_case__ , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
lowerCAmelCase : List[str] = getattr(snake_case__ , normalizer_state.pop('type' ) )
lowerCAmelCase : int = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Dict = tokenize_chinese_chars
lowerCAmelCase : Any = normalizer_class(**snake_case__ )
lowerCAmelCase : List[Any] = do_lower_case
def lowercase ( self , snake_case__ , snake_case__=None ):
lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : Dict = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : Union[str, Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_input_lengths
lowerCAmelCase : str = use_token_type_ids
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : List[str] = gelu_activation
lowerCAmelCase : str = sinusoidal_embeddings
lowerCAmelCase : str = causal
lowerCAmelCase : Dict = asm
lowerCAmelCase : List[str] = n_langs
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = n_special
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Optional[int] = num_labels
lowerCAmelCase : Tuple = num_choices
lowerCAmelCase : Optional[int] = summary_type
lowerCAmelCase : Optional[Any] = use_proj
lowerCAmelCase : str = scope
def lowercase ( self ):
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_input_lengths:
lowerCAmelCase : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase : int = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase : str = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : int = FlaubertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ )
lowerCAmelCase : int = model(snake_case__ , langs=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : str = FlaubertWithLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : Optional[Any] = FlaubertForQuestionAnsweringSimple(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : Dict = FlaubertForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Any = model(snake_case__ )
lowerCAmelCase : List[str] = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , )
lowerCAmelCase : List[Any] = model(
snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , )
(lowerCAmelCase ) : Any = result_with_labels.to_tuple()
lowerCAmelCase : Tuple = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ )
(lowerCAmelCase ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : str = FlaubertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : Any = FlaubertForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase : Optional[Any] = self.num_choices
lowerCAmelCase : Any = FlaubertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Tuple = config_and_inputs
lowerCAmelCase : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Any = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : Optional[int] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = FlaubertModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ )
@slow
def lowercase ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Union[str, Any] = FlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
@require_torch_gpu
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase : Dict = True
lowerCAmelCase : Optional[Any] = model_class(config=snake_case__ )
lowerCAmelCase : int = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : str = torch.jit.trace(
snake_case__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case__ , os.path.join(snake_case__ , 'traced_model.pt' ) )
lowerCAmelCase : List[Any] = torch.jit.load(os.path.join(snake_case__ , 'traced_model.pt' ) , map_location=snake_case__ )
loaded(inputs_dict['input_ids'].to(snake_case__ ) , inputs_dict['attention_mask'].to(snake_case__ ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : int = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
lowerCAmelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCAmelCase : List[Any] = model(snake_case__ )[0]
lowerCAmelCase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : str = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
super().__init__(
features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
lowerCAmelCase : List[str] = Generator(
cache_dir=snake_case__ , features=snake_case__ , generator=snake_case__ , gen_kwargs=snake_case__ , **snake_case__ , )
def lowercase ( self ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase : Dict = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Any = None
lowerCAmelCase : int = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
lowerCAmelCase : Optional[int] = self.builder.as_dataset(
split='train' , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=2 , snake_case__=56 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=2 , snake_case__=7 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=4 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=2 , snake_case__=3 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_attention_mask
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Optional[Any] = use_labels
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : List[Any] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : int = num_choices
lowerCAmelCase : Optional[int] = rescale_embeddings
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = use_bias
lowerCAmelCase : Optional[Any] = block_size
lowerCAmelCase : Dict = num_random_blocks
def lowercase ( self ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_attention_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Any = None
if self.use_token_type_ids:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : int = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_lowerCamelCase : Tuple = False
_lowerCamelCase : str = False
def lowercase ( self ):
lowerCAmelCase : Optional[int] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase ( self ):
super().test_hidden_states_output()
@slow
def lowercase ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(snake_case__ )
def lowercase ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : int = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : int = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ , snake_case__=None , **snake_case__ ):
return model(input_ids=snake_case__ , attention_mask=snake_case__ , **snake_case__ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Tuple = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : List[str] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=1e-5 , snake_case__="outputs" , snake_case__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( _A : List[Any]="" ) -> str:
"""simple docstring"""
lowerCAmelCase : Tuple = tempfile.mkdtemp()
return os.path.join(_A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase : Tuple = AgentAudio(snake_case__ )
lowerCAmelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case__ ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase : int = sf.read(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , torch.tensor(snake_case__ ) , atol=1e-4 ) )
def lowercase ( self ):
lowerCAmelCase : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase : str = get_new_path(suffix='.wav' )
sf.write(snake_case__ , snake_case__ , 1_6000 )
lowerCAmelCase : List[str] = AgentAudio(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case__ )
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : str = torch.randint(0 , 256 , (64, 64, 3) )
lowerCAmelCase : List[Any] = AgentImage(snake_case__ )
lowerCAmelCase : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def lowercase ( self ):
lowerCAmelCase : List[str] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
lowerCAmelCase : int = Image.open(snake_case__ )
lowerCAmelCase : Optional[int] = AgentImage(snake_case__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def lowercase ( self ):
lowerCAmelCase : Any = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
lowerCAmelCase : Optional[Any] = Image.open(snake_case__ )
lowerCAmelCase : str = AgentImage(snake_case__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Optional[int] = 'Hey!'
lowerCAmelCase : int = AgentText(snake_case__ )
self.assertEqual(snake_case__ , agent_type.to_string() )
self.assertEqual(snake_case__ , agent_type.to_raw() )
self.assertEqual(snake_case__ , snake_case__ )
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
from bisect import bisect
from itertools import accumulate
def __UpperCamelCase ( _A : Optional[Any] , _A : Tuple , _A : Dict , _A : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase : str = sorted(zip(_A , _A ) , key=lambda _A : x[0] / x[1] , reverse=_A )
lowerCAmelCase : Tuple = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase : Optional[int] = list(accumulate(_A ) )
lowerCAmelCase : Optional[int] = bisect(_A , _A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
set_seed(770)
_lowerCAmelCase : Optional[int] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_lowerCAmelCase : int = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_lowerCAmelCase : Any = os.path.dirname(os.path.abspath(__file__))
_lowerCAmelCase : List[Any] = os.path.join(os.path.expanduser('~'), '.cache')
_lowerCAmelCase : Optional[int] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __UpperCamelCase ( _A : List[str] , _A : List[Any]=False ) -> str:
"""simple docstring"""
lowerCAmelCase : Dict = model_type
if use_small:
key += "_small"
return os.path.join(_A , REMOTE_MODEL_PATHS[key]['file_name'] )
def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
hf_hub_download(repo_id=_A , filename=_A , local_dir=_A )
def __UpperCamelCase ( _A : str , _A : Union[str, Any] , _A : Any=False , _A : Union[str, Any]="text" ) -> Optional[int]:
"""simple docstring"""
if model_type == "text":
lowerCAmelCase : int = BarkSemanticModel
lowerCAmelCase : Optional[Any] = BarkSemanticConfig
lowerCAmelCase : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase : str = BarkCoarseModel
lowerCAmelCase : Dict = BarkCoarseConfig
lowerCAmelCase : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase : Optional[int] = BarkFineModel
lowerCAmelCase : str = BarkFineConfig
lowerCAmelCase : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase : Optional[Any] = F"{model_type}_small" if use_small else model_type
lowerCAmelCase : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_A ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCAmelCase : Optional[int] = torch.load(_A , map_location=_A )
# this is a hack
lowerCAmelCase : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase : Any = model_args['vocab_size']
lowerCAmelCase : List[str] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase : Any = model_args.pop('n_head' )
lowerCAmelCase : Tuple = model_args.pop('n_embd' )
lowerCAmelCase : List[str] = model_args.pop('n_layer' )
lowerCAmelCase : Tuple = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase : List[Any] = ModelClass(config=_A )
lowerCAmelCase : Union[str, Any] = GenerationConfigClass()
lowerCAmelCase : Dict = model_generation_config
lowerCAmelCase : List[Any] = checkpoint['model']
# fixup checkpoint
lowerCAmelCase : Any = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_A ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase : Union[str, Any] = k[len(_A ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase : int = new_k.replace(_A , new_layer_name_dict[old_layer_name] )
lowerCAmelCase : Dict = state_dict.pop(_A )
lowerCAmelCase : Any = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase : str = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase : int = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase : Optional[int] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_A ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(_A ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(_A , strict=_A )
lowerCAmelCase : Tuple = model.num_parameters(exclude_embeddings=_A )
lowerCAmelCase : int = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(_A , 3 )} loss" )
model.eval()
model.to(_A )
del checkpoint, state_dict
return model
def __UpperCamelCase ( _A : int , _A : List[str]=False , _A : Union[str, Any]="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase : str = 'cpu' # do conversion on cpu
lowerCAmelCase : str = _get_ckpt_path(_A , use_small=_A )
lowerCAmelCase : List[Any] = _load_model(_A , _A , model_type=_A , use_small=_A )
# load bark initial model
lowerCAmelCase : Any = _bark_load_model(_A , 'cpu' , model_type=_A , use_small=_A )
if model_type == "text":
lowerCAmelCase : Optional[Any] = bark_model['model']
if model.num_parameters(exclude_embeddings=_A ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase : Union[str, Any] = 5
lowerCAmelCase : Optional[Any] = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase : int = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase : Dict = bark_model(_A )[0]
lowerCAmelCase : int = model(_A )
# take last logits
lowerCAmelCase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase : int = 3
lowerCAmelCase : int = 8
lowerCAmelCase : List[str] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase : Optional[Any] = model(_A , _A )
lowerCAmelCase : int = bark_model(_A , _A )
lowerCAmelCase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
def __UpperCamelCase ( _A : str , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : List[str] , _A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = os.path.join(_A , _A )
lowerCAmelCase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(_A , 'config.json' ) )
lowerCAmelCase : int = BarkCoarseConfig.from_pretrained(os.path.join(_A , 'config.json' ) )
lowerCAmelCase : Tuple = BarkFineConfig.from_pretrained(os.path.join(_A , 'config.json' ) )
lowerCAmelCase : str = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase : int = BarkSemanticModel.from_pretrained(_A )
lowerCAmelCase : Optional[int] = BarkCoarseModel.from_pretrained(_A )
lowerCAmelCase : str = BarkFineModel.from_pretrained(_A )
lowerCAmelCase : List[str] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase : Optional[Any] = BarkConfig.from_sub_model_configs(
_A , _A , _A , _A )
lowerCAmelCase : int = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase : Dict = BarkModel(_A )
lowerCAmelCase : int = semantic
lowerCAmelCase : Union[str, Any] = coarseAcoustic
lowerCAmelCase : int = fineAcoustic
lowerCAmelCase : Dict = codec
lowerCAmelCase : List[Any] = bark_generation_config
Path(_A ).mkdir(exist_ok=_A )
bark.save_pretrained(_A , repo_id=_A , push_to_hub=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_lowerCAmelCase : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
import numpy as np
class lowerCAmelCase :
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
def lowercase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
if red is not None:
lowerCAmelCase : List[Any] = red
if green is not None:
lowerCAmelCase : Tuple = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Any = nir
return True
def lowercase ( self , snake_case__="" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
lowerCAmelCase : Any = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def lowercase ( self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase ( self ):
return self.nir * (self.red / (self.green**2))
def lowercase ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def lowercase ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def lowercase ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase ( self , snake_case__=0.0_8 , snake_case__=1.2_2 , snake_case__=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase ( self ):
return (self.nir / self.green) - 1
def lowercase ( self ):
return (self.nir / self.redEdge) - 1
def lowercase ( self ):
return (self.red - self.blue) / self.red
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase ( self ):
return self.nir - self.green
def lowercase ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase ( self ):
lowerCAmelCase : int = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def lowercase ( self , snake_case__=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase ( self , snake_case__=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowercase ( self , snake_case__=None , snake_case__=None ):
return (self.nir - b) / (a * self.red)
def lowercase ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase ( self ):
return (self.red + self.green + self.blue) / 30.5
def lowercase ( self ):
return self.nir / self.red
def lowercase ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase ( self ):
return self.green / (self.nir + self.red + self.green)
def lowercase ( self ):
return self.nir / (self.nir + self.red + self.green)
def lowercase ( self ):
return self.red / (self.nir + self.red + self.green)
def lowercase ( self ):
return (self.green - self.red) / (self.green + self.red)
def lowercase ( self ):
return (self.red - self.green) / (self.red + self.green)
def lowercase ( self ):
lowerCAmelCase : List[str] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : List[str] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase ( self ):
return self.nir / self.red
def lowercase ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Dict = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase : str = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase : Tuple = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase : Union[str, Any] = tf_top_k_top_p_filtering(snake_case__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase : List[str] = output[output != -float('inf' )]
lowerCAmelCase : int = tf.cast(
tf.where(tf.not_equal(snake_case__ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-1_2 )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase , a ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_lowerCamelCase : Optional[Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def lowercase ( self ):
# TF-only test: tf.saved_model export
lowerCAmelCase : int = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : List[str] = 2
class lowerCAmelCase ( tf.Module ):
def __init__( self , snake_case__ ):
super(snake_case__ , self ).__init__()
lowerCAmelCase : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=snake_case__ , )
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.model.generate(
input_ids=snake_case__ , attention_mask=snake_case__ , max_new_tokens=snake_case__ , return_dict_in_generate=snake_case__ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Optional[int] = [[2, 0], [102, 103]]
lowerCAmelCase : List[Any] = [[1, 0], [1, 1]]
lowerCAmelCase : List[Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__ , snake_case__ , signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase : int = tf.saved_model.load(snake_case__ ).signatures['serving_default']
for batch_size in range(1 , len(snake_case__ ) + 1 ):
lowerCAmelCase : List[str] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase : Any = serving_func(**snake_case__ )['sequences']
lowerCAmelCase : Optional[int] = test_model.generate(**snake_case__ , max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@slow
def lowercase ( self ):
# TF-only test: tf.saved_model export
lowerCAmelCase : str = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : int = 1
lowerCAmelCase : Dict = 2
class lowerCAmelCase ( tf.Module ):
def __init__( self , snake_case__ ):
super(snake_case__ , self ).__init__()
lowerCAmelCase : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=snake_case__ , )
def lowercase ( self , snake_case__ , snake_case__ ):
lowerCAmelCase : int = self.model.generate(
input_ids=snake_case__ , attention_mask=snake_case__ , max_new_tokens=snake_case__ , return_dict_in_generate=snake_case__ , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase : Dict = [[2], [102, 103]]
lowerCAmelCase : Any = [[1], [1, 1]]
lowerCAmelCase : List[Any] = DummyModel(model=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case__ , snake_case__ , signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase : Any = tf.saved_model.load(snake_case__ ).signatures['serving_default']
for input_row in range(len(snake_case__ ) ):
lowerCAmelCase : int = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase : Optional[Any] = serving_func(**snake_case__ )['sequences']
lowerCAmelCase : Any = test_model.generate(**snake_case__ , max_new_tokens=snake_case__ )
tf.debugging.assert_equal(snake_case__ , snake_case__ )
@slow
@require_tensorflow_text
def lowercase ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=snake_case__ )
class lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
lowerCAmelCase : Optional[int] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case__ , 'spiece.model' ) , 'rb' ).read() )
lowerCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def lowercase ( self , snake_case__ , *snake_case__ , **snake_case__ ):
lowerCAmelCase : List[str] = self.tokenizer.tokenize(snake_case__ )
lowerCAmelCase : Any = text.pad_model_inputs(
snake_case__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase : Optional[int] = self.model.generate(input_ids=snake_case__ , attention_mask=snake_case__ )
return self.tokenizer.detokenize(snake_case__ )
lowerCAmelCase : Optional[Any] = CompleteSentenceTransformer()
lowerCAmelCase : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
lowerCAmelCase : Any = complete_model(snake_case__ )
lowerCAmelCase : str = tf.keras.Model(snake_case__ , snake_case__ )
keras_model.save(snake_case__ )
def lowercase ( self ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase : Optional[Any] = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase : Dict = 14
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Tuple = 'Hello, my dog is cute and'
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='tf' )
lowerCAmelCase : int = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Union[str, Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase : str = model.generate(**snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase : Union[str, Any] = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase : Union[str, Any] = model.generate(**snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase ( self ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase : Tuple = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase : Tuple = bart_tokenizer(snake_case__ , return_tensors='tf' ).input_ids
lowerCAmelCase : Optional[Any] = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase : Tuple = bart_model.generate(snake_case__ ).numpy()
class lowerCAmelCase ( a ):
def lowercase ( self , snake_case__ , snake_case__=None , **snake_case__ ):
return super().call(snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase : Union[str, Any] = bart_model.generate(snake_case__ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(snake_case__ , snake_case__ ) )
class lowerCAmelCase ( bart_model.model.encoder.__class__ ):
def lowercase ( self , snake_case__ , **snake_case__ ):
return super().call(snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase : Any = bart_model.generate(snake_case__ ).numpy()
with self.assertRaises(snake_case__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case__ , foo='bar' )
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ['DeiTFeatureExtractor']
_lowerCAmelCase : Union[str, Any] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Tuple = """donut-swin"""
_lowerCamelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.0_2 , snake_case__=1e-5 , **snake_case__ , ):
super().__init__(**snake_case__ )
lowerCAmelCase : List[str] = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : str = num_channels
lowerCAmelCase : List[str] = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : Tuple = len(snake_case__ )
lowerCAmelCase : Tuple = num_heads
lowerCAmelCase : Dict = window_size
lowerCAmelCase : Tuple = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Dict = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Any = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCAmelCase : Tuple = model(snake_case__ )['last_hidden_state']
lowerCAmelCase : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
lowerCAmelCase : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
from manim import *
class lowerCAmelCase ( a ):
def lowercase ( self ):
lowerCAmelCase : int = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : str = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Any = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : str = VGroup(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Dict = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case__ )
lowerCAmelCase : str = [mem.copy() for i in range(1 )]
lowerCAmelCase : Any = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Optional[int] = Text('GPU' , font_size=24 )
lowerCAmelCase : List[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
gpu.align_to(snake_case__ , snake_case__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case__ )
lowerCAmelCase : Dict = [mem.copy() for i in range(6 )]
lowerCAmelCase : Dict = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Tuple = Text('Model' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , )
lowerCAmelCase : Tuple = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case__ , run_time=2.5 ) , Write(snake_case__ ) , Write(snake_case__ ) )
self.add(snake_case__ )
lowerCAmelCase : List[str] = []
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[Any] = []
for i, rect in enumerate(snake_case__ ):
lowerCAmelCase : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(snake_case__ , opacity=0.7 )
cpu_target.move_to(snake_case__ )
cpu_target.generate_target()
lowerCAmelCase : str = 0.4_6 / 4
lowerCAmelCase : Optional[Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=snake_case__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case__ , buff=0.0 )
cpu_targs.append(snake_case__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case__ ) )
second_animations.append(MoveToTarget(snake_case__ , run_time=1.5 ) )
self.play(*snake_case__ )
self.play(*snake_case__ )
self.wait()
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """openai-gpt"""
_lowerCamelCase : Optional[int] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case__=4_0478 , snake_case__=512 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.0_2 , snake_case__="cls_index" , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=0.1 , **snake_case__ , ):
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[Any] = n_positions
lowerCAmelCase : Dict = n_embd
lowerCAmelCase : Optional[int] = n_layer
lowerCAmelCase : Union[str, Any] = n_head
lowerCAmelCase : List[str] = afn
lowerCAmelCase : Tuple = resid_pdrop
lowerCAmelCase : List[str] = embd_pdrop
lowerCAmelCase : List[Any] = attn_pdrop
lowerCAmelCase : List[Any] = layer_norm_epsilon
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Optional[Any] = summary_type
lowerCAmelCase : List[str] = summary_use_proj
lowerCAmelCase : List[str] = summary_activation
lowerCAmelCase : Optional[Any] = summary_first_dropout
lowerCAmelCase : Union[str, Any] = summary_proj_to_labels
super().__init__(**snake_case__ )
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowerCAmelCase : Dict = TypeVar('T')
class lowerCAmelCase ( Generic[T] ):
def __init__( self , snake_case__ = True ):
lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
lowerCAmelCase : str = directed
def lowercase ( self , snake_case__ , snake_case__ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
self.adj_list[destination_vertex].append(snake_case__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
lowerCAmelCase : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case__ )
lowerCAmelCase : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCAmelCase : Dict = [destination_vertex]
lowerCAmelCase : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case__ )
lowerCAmelCase : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCAmelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCAmelCase : Dict = [destination_vertex]
lowerCAmelCase : str = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( _A : Sequence[float] , _A : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
lowerCAmelCase : Union[str, Any] = 0 if allow_empty_subarrays else float('-inf' )
lowerCAmelCase : Optional[int] = 0.0
for num in arr:
lowerCAmelCase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase : Dict = max(_A , _A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : List[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
import operator as op
_lowerCAmelCase : Any = 'scaler.pt'
_lowerCAmelCase : List[str] = 'pytorch_model'
_lowerCAmelCase : Union[str, Any] = 'random_states'
_lowerCAmelCase : Tuple = 'optimizer'
_lowerCAmelCase : List[str] = 'scheduler'
_lowerCAmelCase : Tuple = 'pytorch_model.bin'
_lowerCAmelCase : str = 'pytorch_model.bin.index.json'
_lowerCAmelCase : Union[str, Any] = 'model.safetensors'
_lowerCAmelCase : int = 'model.safetensors.index.json'
_lowerCAmelCase : List[str] = '1.10.2'
_lowerCAmelCase : int = 'py38'
_lowerCAmelCase : Optional[Any] = '4.17.0'
_lowerCAmelCase : List[str] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
_lowerCAmelCase : str = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
_lowerCAmelCase : str = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
_lowerCAmelCase : Union[str, Any] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
_lowerCAmelCase : Dict = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
_lowerCAmelCase : Any = '2.0.1'
_lowerCAmelCase : int = ['pdsh', 'standard', 'openmpi', 'mvapich']
_lowerCAmelCase : Dict = ['default', 'reduce-overhead', 'max-autotune']
_lowerCAmelCase : str = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowerCAmelCase : Optional[int] = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
_lowerCAmelCase : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
_lowerCAmelCase : List[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_lowerCAmelCase : Any = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_lowerCAmelCase : List[str] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __UpperCamelCase ( _A : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : List[str] = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase : str = numpy_to_pil(_A )
return images
def __UpperCamelCase ( _A : Any ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
lowerCAmelCase : int = images[None, ...]
lowerCAmelCase : Union[str, Any] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCAmelCase : List[str] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
lowerCAmelCase : Any = [Image.fromarray(_A ) for image in images]
return pil_images
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from string import ascii_uppercase
_lowerCAmelCase : List[str] = {char: i for i, char in enumerate(ascii_uppercase)}
_lowerCAmelCase : List[Any] = dict(enumerate(ascii_uppercase))
def __UpperCamelCase ( _A : str , _A : str ) -> str:
"""simple docstring"""
lowerCAmelCase : Any = len(_A )
lowerCAmelCase : Tuple = 0
while True:
if x == i:
lowerCAmelCase : str = 0
if len(_A ) == len(_A ):
break
key += key[i]
i += 1
return key
def __UpperCamelCase ( _A : str , _A : str ) -> str:
"""simple docstring"""
lowerCAmelCase : str = ''
lowerCAmelCase : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCAmelCase : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCamelCase ( _A : str , _A : str ) -> str:
"""simple docstring"""
lowerCAmelCase : Tuple = ''
lowerCAmelCase : Union[str, Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCAmelCase : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = 'THE GERMAN ATTACK'
lowerCAmelCase : Tuple = 'SECRET'
lowerCAmelCase : int = generate_key(_A , _A )
lowerCAmelCase : Union[str, Any] = cipher_text(_A , _A )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(_A , _A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase : int = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase : Any = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
_lowerCAmelCase : Any = '▁'
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : str = BigBirdTokenizer
_lowerCamelCase : Dict = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : List[int] = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ):
lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
lowerCAmelCase : Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : Dict = [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self , snake_case__ , snake_case__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : str = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : int = CycleDiffusionPipeline
_lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
_lowerCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
_lowerCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : Dict = CLIPTextModel(snake_case__ )
lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Dict = image / 2 + 0.5
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : str = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase : Optional[int] = CycleDiffusionPipeline(**snake_case__ )
lowerCAmelCase : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Optional[Any] = pipe(**snake_case__ )
lowerCAmelCase : Optional[Any] = output.images
lowerCAmelCase : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase : Optional[Any] = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase ( self ):
lowerCAmelCase : Any = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case__ , 'half' ):
lowerCAmelCase : Optional[int] = module.half()
lowerCAmelCase : List[str] = CycleDiffusionPipeline(**snake_case__ )
lowerCAmelCase : List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : List[str] = pipe(**snake_case__ )
lowerCAmelCase : List[Any] = output.images
lowerCAmelCase : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase : Tuple = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self ):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def lowercase ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
lowerCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCAmelCase : List[Any] = init_image.resize((512, 512) )
lowerCAmelCase : List[str] = 'CompVis/stable-diffusion-v1-4'
lowerCAmelCase : List[Any] = DDIMScheduler.from_pretrained(snake_case__ , subfolder='scheduler' )
lowerCAmelCase : int = CycleDiffusionPipeline.from_pretrained(
snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Optional[Any] = 'A black colored car'
lowerCAmelCase : Union[str, Any] = 'A blue colored car'
lowerCAmelCase : Dict = torch.manual_seed(0 )
lowerCAmelCase : Any = pipe(
prompt=snake_case__ , source_prompt=snake_case__ , image=snake_case__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case__ , output_type='np' , )
lowerCAmelCase : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowercase ( self ):
lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCAmelCase : Union[str, Any] = init_image.resize((512, 512) )
lowerCAmelCase : str = 'CompVis/stable-diffusion-v1-4'
lowerCAmelCase : Optional[int] = DDIMScheduler.from_pretrained(snake_case__ , subfolder='scheduler' )
lowerCAmelCase : Optional[Any] = CycleDiffusionPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Tuple = 'A black colored car'
lowerCAmelCase : List[Any] = 'A blue colored car'
lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase : Dict = pipe(
prompt=snake_case__ , source_prompt=snake_case__ , image=snake_case__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case__ , output_type='np' , )
lowerCAmelCase : Dict = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[str] = KandinskyVaaImgaImgPipeline
_lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """image"""]
_lowerCamelCase : Any = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_lowerCamelCase : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase : Dict = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.dummy_unet
lowerCAmelCase : Tuple = self.dummy_movq
lowerCAmelCase : List[Any] = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase : List[Any] = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Dict = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = 'cpu'
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : Tuple = self.pipeline_class(**snake_case__ )
lowerCAmelCase : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : int = output.images
lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : str = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Tuple = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase : Optional[Any] = 'A red cartoon frog, 4k'
lowerCAmelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : Any = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase : Optional[int] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase : Optional[int] = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : List[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCAmelCase : Dict = 10_24
lowerCAmelCase : List[Any] = 40_96
lowerCAmelCase : Optional[Any] = 24
lowerCAmelCase : Any = 16
lowerCAmelCase : Any = [5, 11, 17, 23]
lowerCAmelCase : Optional[int] = [2_56, 5_12, 10_24, 10_24]
lowerCAmelCase : Dict = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase : int = 7_68
lowerCAmelCase : Optional[int] = [1, 1, 1, 0.5]
lowerCAmelCase : Any = [2_56, 5_12, 7_68, 7_68]
lowerCAmelCase : Tuple = 1_50
lowerCAmelCase : List[str] = 16
lowerCAmelCase : Dict = (1, 3_84, 3_84)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Tuple = 'project'
if "ade" in checkpoint_url:
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = 7_68
lowerCAmelCase : Optional[Any] = [1, 1, 1, 0.5]
lowerCAmelCase : Optional[int] = 1_50
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[Any] = 'huggingface/label-files'
lowerCAmelCase : str = 'ade20k-id2label.json'
lowerCAmelCase : str = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase : Optional[int] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : List[str] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A : Optional[Any] ) -> str:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase : List[str] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase : List[Any] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase : Tuple = name.replace('patch_embed' , '' )
if "pos_embed" in name:
lowerCAmelCase : Union[str, Any] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase : Dict = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCAmelCase : Any = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase : Any = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase : Tuple = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCAmelCase : List[Any] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCAmelCase : Dict = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase : int = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCAmelCase : Tuple = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase : Any = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCAmelCase : Any = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase : Union[str, Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase : List[Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase : Tuple = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCAmelCase : Tuple = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase : List[Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase : Optional[int] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase : Any = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase : Dict = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase : List[Any] = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCAmelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCAmelCase : Union[str, Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase : int = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase : List[str] = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
lowerCAmelCase : List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
lowerCAmelCase : List[Any] = name.replace('..' , '.' )
if "stem.conv" in name:
lowerCAmelCase : List[str] = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase : Union[str, Any] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
lowerCAmelCase : str = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase : Any = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
lowerCAmelCase : Union[str, Any] = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase : Optional[int] = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def __UpperCamelCase ( _A : Dict , _A : str ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCAmelCase : Optional[Any] = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Dict = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : str = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : List[str] = get_dpt_config(_A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase : str = torch.load(_A , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_A )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase : str = state_dict.pop(_A )
lowerCAmelCase : Union[str, Any] = val
# read in qkv matrices
read_in_q_k_v(_A , _A )
# load HuggingFace model
lowerCAmelCase : Any = DPTForSemanticSegmentation(_A ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_A )
model.load_state_dict(_A )
model.eval()
# Check outputs on an image
lowerCAmelCase : List[str] = 4_80 if 'ade' in checkpoint_url else 3_84
lowerCAmelCase : Any = DPTImageProcessor(size=_A )
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : int = image_processor(_A , return_tensors='pt' )
# forward pass
lowerCAmelCase : Optional[Any] = model(**_A ).logits if 'ade' in checkpoint_url else model(**_A ).predicted_depth
if show_prediction:
lowerCAmelCase : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=_A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
_lowerCAmelCase : Any = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
_lowerCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__=125 , snake_case__=None , **snake_case__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase : int = [f"<extra_id_{i}>" for i in range(snake_case__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase : Tuple = len(set(filter(lambda snake_case__ : bool('extra_id' in str(snake_case__ ) ) , snake_case__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , extra_ids=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCAmelCase : Dict = extra_ids
lowerCAmelCase : Any = 2**8 # utf is 8 bits
# define special tokens dict
lowerCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCAmelCase : Any = len(self.special_tokens_encoder )
lowerCAmelCase : int = len(snake_case__ )
for i, token in enumerate(snake_case__ ):
lowerCAmelCase : int = self.vocab_size + i - n
lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowercase ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowercase ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def lowercase ( self , snake_case__ ):
if len(snake_case__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase ( self , snake_case__ , snake_case__ = None ):
lowerCAmelCase : Dict = self._add_eos_if_not_present(snake_case__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase : int = self._add_eos_if_not_present(snake_case__ )
return token_ids_a + token_ids_a
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [chr(snake_case__ ) for i in text.encode('utf-8' )]
return tokens
def lowercase ( self , snake_case__ ):
if token in self.special_tokens_encoder:
lowerCAmelCase : int = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCAmelCase : int = self.added_tokens_encoder[token]
elif len(snake_case__ ) != 1:
lowerCAmelCase : Union[str, Any] = self.unk_token_id
else:
lowerCAmelCase : Any = ord(snake_case__ ) + self._num_special_tokens
return token_id
def lowercase ( self , snake_case__ ):
if index in self.special_tokens_decoder:
lowerCAmelCase : Any = self.special_tokens_decoder[index]
else:
lowerCAmelCase : int = chr(index - self._num_special_tokens )
return token
def lowercase ( self , snake_case__ ):
lowerCAmelCase : Dict = B''
for token in tokens:
if token in self.special_tokens_decoder:
lowerCAmelCase : Dict = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
lowerCAmelCase : Any = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
lowerCAmelCase : Tuple = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
lowerCAmelCase : List[str] = token.encode('utf-8' )
else:
lowerCAmelCase : Dict = bytes([ord(snake_case__ )] )
bstring += tok_string
lowerCAmelCase : Any = bstring.decode('utf-8' , errors='ignore' )
return string
def lowercase ( self , snake_case__ , snake_case__ = None ):
return ()
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __UpperCamelCase ( _A : Any , _A : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase : str = model_name.find('patch' )
lowerCAmelCase : Tuple = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase : List[Any] = XCLIPVisionConfig(patch_size=_A , num_frames=_A )
if "large" in model_name:
lowerCAmelCase : List[Any] = 7_68
lowerCAmelCase : List[str] = 30_72
lowerCAmelCase : Dict = 12
lowerCAmelCase : Tuple = 10_24
lowerCAmelCase : Any = 40_96
lowerCAmelCase : str = 16
lowerCAmelCase : Union[str, Any] = 24
lowerCAmelCase : Union[str, Any] = 7_68
lowerCAmelCase : Optional[Any] = 30_72
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase : Optional[Any] = 3_36
lowerCAmelCase : Tuple = XCLIPConfig.from_text_vision_configs(_A , _A )
if "large" in model_name:
lowerCAmelCase : int = 7_68
return config
def __UpperCamelCase ( _A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if name == "token_embedding.weight":
lowerCAmelCase : Optional[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase : Dict = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase : List[Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase : Dict = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase : Dict = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase : Tuple = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase : Optional[Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase : List[Any] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase : int = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase : Union[str, Any] = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase : int = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase : str = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase : List[Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase : List[str] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase : int = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase : List[Any] = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase : str = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase : str = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase : str = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase : str = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase : List[str] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __UpperCamelCase ( _A : Optional[int] , _A : Any ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase : List[str] = orig_state_dict.pop(_A )
if "attn.in_proj" in key:
lowerCAmelCase : Optional[int] = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase : str = key_split[3]
lowerCAmelCase : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase : Optional[Any] = val[
:dim, :
]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[
-dim:, :
]
else:
lowerCAmelCase : int = val[
:dim
]
lowerCAmelCase : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase : Any = val[
:dim, :
]
lowerCAmelCase : List[str] = val[
dim : dim * 2, :
]
lowerCAmelCase : str = val[
-dim:, :
]
else:
lowerCAmelCase : int = val[:dim]
lowerCAmelCase : List[Any] = val[
dim : dim * 2
]
lowerCAmelCase : str = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase : List[Any] = key_split[2]
lowerCAmelCase : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase : List[str] = val[:dim, :]
lowerCAmelCase : str = val[dim : dim * 2, :]
lowerCAmelCase : Tuple = val[-dim:, :]
else:
lowerCAmelCase : Optional[int] = val[:dim]
lowerCAmelCase : str = val[dim : dim * 2]
lowerCAmelCase : int = val[-dim:]
else:
lowerCAmelCase : str = key_split[2]
lowerCAmelCase : str = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase : Union[str, Any] = val[:dim, :]
lowerCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase : Tuple = val[-dim:, :]
else:
lowerCAmelCase : Union[str, Any] = val[:dim]
lowerCAmelCase : Dict = val[
dim : dim * 2
]
lowerCAmelCase : Optional[int] = val[-dim:]
else:
lowerCAmelCase : int = rename_key(_A )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase : Dict = val.T
lowerCAmelCase : Optional[Any] = val
return orig_state_dict
def __UpperCamelCase ( _A : Any ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
lowerCAmelCase : Dict = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase : Union[str, Any] = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase : Optional[int] = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase : List[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_A , repo_type='dataset' , )
lowerCAmelCase : Any = np.load(_A )
return list(_A )
def __UpperCamelCase ( _A : Optional[Any] , _A : Union[str, Any]=None , _A : Dict=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : int = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase : Any = model_to_url[model_name]
lowerCAmelCase : Optional[Any] = 8
if "16-frames" in model_name:
lowerCAmelCase : Union[str, Any] = 16
elif "shot" in model_name:
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : Dict = get_xclip_config(_A , _A )
lowerCAmelCase : Tuple = XCLIPModel(_A )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = 'pytorch_model.bin'
gdown.cached_download(_A , _A , quiet=_A )
lowerCAmelCase : Optional[Any] = torch.load(_A , map_location='cpu' )['model']
else:
lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_A )['model']
lowerCAmelCase : Optional[int] = convert_state_dict(_A , _A )
lowerCAmelCase : int = XCLIPModel(_A )
lowerCAmelCase : Union[str, Any] = model.load_state_dict(_A , strict=_A )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase : int = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
lowerCAmelCase : Dict = VideoMAEImageProcessor(size=_A )
lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase : Union[str, Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase : Optional[int] = XCLIPProcessor(image_processor=_A , tokenizer=_A )
lowerCAmelCase : Dict = prepare_video(_A )
lowerCAmelCase : List[str] = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_A , return_tensors='pt' , padding=_A )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase : Any = model(**_A )
# Verify outputs
lowerCAmelCase : str = outputs.logits_per_video
lowerCAmelCase : List[str] = logits_per_video.softmax(dim=1 )
print('Probs:' , _A )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase : Any = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase : str = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase : Union[str, Any] = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase : Optional[Any] = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase : List[str] = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase : str = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase : Optional[Any] = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase : str = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase : str = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase : Tuple = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase : List[str] = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase : Optional[int] = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase : List[Any] = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase : Union[str, Any] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase : Optional[Any] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase : Optional[Any] = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase : Any = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase : List[str] = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(_A , _A , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_A , organization='nielsr' )
processor.push_to_hub(_A , organization='nielsr' )
slow_tokenizer.push_to_hub(_A , organization='nielsr' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Union[str, Any] = """open-llama"""
def __init__( self , snake_case__=10_0000 , snake_case__=4096 , snake_case__=1_1008 , snake_case__=32 , snake_case__=32 , snake_case__="silu" , snake_case__=2048 , snake_case__=0.0_2 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=0.1 , snake_case__=0.1 , snake_case__=True , snake_case__=True , snake_case__=None , **snake_case__ , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_dropout_prob
lowerCAmelCase : Union[str, Any] = use_stable_embedding
lowerCAmelCase : Tuple = shared_input_output_embedding
lowerCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , snake_case__ )
lowerCAmelCase : List[str] = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 646
| 0
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowerCAmelCase : List[str] = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
_lowerCAmelCase : Optional[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : List[Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(_A , _A )
lowerCAmelCase : Tuple = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = 'rougeLsum'
lowerCAmelCase : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
lowerCAmelCase : Tuple = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
assert score > score_no_sep
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : List[str] = ['rouge1', 'rouge2', 'rougeL']
lowerCAmelCase : Optional[int] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
lowerCAmelCase : Optional[int] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
assert score_sep == score_no_sep
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
lowerCAmelCase : Union[str, Any] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : List[Any] = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
lowerCAmelCase : Union[str, Any] = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
lowerCAmelCase : Any = calculate_rouge(_A , _A , rouge_keys=['rougeLsum'] , newline_sep=_A )['rougeLsum']
lowerCAmelCase : Any = calculate_rouge(_A , _A , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : int = Path('examples/seq2seq/test_data/wmt_en_ro' )
lowerCAmelCase : Union[str, Any] = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(_A , _A )
lowerCAmelCase : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=_A )
assert isinstance(_A , _A )
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646
| 0
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ) -> Iterator[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 2
while True:
if is_prime(_A ):
yield num
num += 1
def __UpperCamelCase ( _A : int = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda _A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowerCAmelCase : List[Any] = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = input('Enter a string ').strip()
_lowerCAmelCase : Dict = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 703
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 0
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 646
| 0
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=5 , snake_case__=4 , snake_case__=64 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : Dict = seq_length
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : List[str] = use_input_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Tuple = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : List[str] = scope
def lowercase ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def lowercase ( self ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[Any] = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Any = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = self.num_choices
lowerCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = self.num_labels
lowerCAmelCase : Optional[int] = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[Any] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
(lowerCAmelCase) : Optional[int] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Dict = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self ):
lowerCAmelCase : Any = MPNetModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowerCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 705
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Dict = KandinskyVaaControlnetImgaImgPipeline
_lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase : Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase : Any = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
lowerCAmelCase : Dict = self.dummy_unet
lowerCAmelCase : int = self.dummy_movq
lowerCAmelCase : str = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase : Dict = DDIMScheduler(**snake_case__ )
lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : int = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create hint
lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = 'cpu'
lowerCAmelCase : int = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Any = output.images
lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : int = image[0, -3:, -3:, -1]
lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase : List[Any] = init_image.resize((512, 512) )
lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
lowerCAmelCase : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase : List[str] = 'A robot, 4k photo'
lowerCAmelCase : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCAmelCase : str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : int = pipe_prior(
snake_case__ , image=snake_case__ , strength=0.8_5 , generator=snake_case__ , negative_prompt='' , ).to_tuple()
lowerCAmelCase : int = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 706
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
| 0
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__ = True , snake_case__ = None , snake_case__ = 32 , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = True , snake_case__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , snake_case__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , snake_case__ = True , snake_case__=7 , snake_case__=30 , snake_case__=400 , snake_case__=3 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : Union[str, Any] = do_resize
lowerCAmelCase : List[Any] = size if size is not None else {'shortest_edge': 288}
lowerCAmelCase : str = size_divisor
lowerCAmelCase : Optional[int] = do_rescale
lowerCAmelCase : List[Any] = rescale_factor
lowerCAmelCase : Tuple = do_normalize
lowerCAmelCase : Union[str, Any] = do_center_crop
lowerCAmelCase : str = image_mean
lowerCAmelCase : List[str] = image_std
lowerCAmelCase : Dict = do_pad
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Union[str, Any] = max_resolution
def lowercase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowercase ( self , snake_case__ , snake_case__=False ):
if not batched:
lowerCAmelCase : Optional[int] = self.size['shortest_edge']
lowerCAmelCase : Union[str, Any] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCAmelCase : Optional[Any] = image.size
else:
lowerCAmelCase : Optional[Any] = image.shape[1], image.shape[2]
lowerCAmelCase : int = size / min(snake_case__ , snake_case__ )
if h < w:
lowerCAmelCase : int = size, scale * w
else:
lowerCAmelCase : str = scale * h, size
lowerCAmelCase : str = int((1333 / 800) * size )
if max(snake_case__ , snake_case__ ) > max_size:
lowerCAmelCase : Dict = max_size / max(snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = newh * scale
lowerCAmelCase : Any = neww * scale
lowerCAmelCase : List[str] = int(newh + 0.5 ), int(neww + 0.5 )
lowerCAmelCase : Tuple = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Optional[Any] = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCAmelCase : str = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : int = BridgeTowerImageProcessor if is_vision_available() else None
def lowercase ( self ):
lowerCAmelCase : Dict = BridgeTowerImageProcessingTester(self )
@property
def lowercase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self ):
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'size_divisor' ) )
def lowercase ( self ):
pass
def lowercase ( self ):
# Initialize image processor
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Union[str, Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self ):
# Initialize image processor
lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self ):
# Initialize image processor
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase : int = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Optional[Any] = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 708
|
'''simple docstring'''
import math
def __UpperCamelCase ( _A : int = 1_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 646
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def __UpperCamelCase ( _A : List[str] , _A : Optional[int]=10_00 ) -> Any:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : Union[str, Any] = n - 1
lowerCAmelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : str = 0
while count < prec:
lowerCAmelCase : int = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(_A , _A , _A )
if b != 1:
lowerCAmelCase : Tuple = True
for _ in range(_A ):
if b == n - 1:
lowerCAmelCase : str = False
break
lowerCAmelCase : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 709
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646
| 0
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __UpperCamelCase ( _A : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_A ):
for j in range(_A ):
lowerCAmelCase : str = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowerCAmelCase : int = imread('image_data/lena.jpg', 1)
# convert to its negative
_lowerCAmelCase : List[str] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 710
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 711
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 0
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowerCAmelCase : Optional[Any] = True
except ImportError:
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( _A : Namespace ) -> Any:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase ( a ):
@staticmethod
def lowercase ( snake_case__ ):
lowerCAmelCase : Any = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=snake_case__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=snake_case__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , snake_case__=None , *snake_case__ ):
lowerCAmelCase : List[Any] = testing
lowerCAmelCase : Any = testing_file
lowerCAmelCase : Any = path
def lowercase ( self ):
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(snake_case__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
lowerCAmelCase : str = (
Path(snake_case__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase : Tuple = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
lowerCAmelCase : Optional[Any] = json.load(snake_case__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case__ , extra_context=snake_case__ , )
lowerCAmelCase : Union[str, Any] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
lowerCAmelCase : Any = json.load(snake_case__ )
lowerCAmelCase : Union[str, Any] = configuration['lowercase_modelname']
lowerCAmelCase : Optional[Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"{directory}/configuration.json" )
lowerCAmelCase : Any = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Dict = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : int = 'Flax' in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : str = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(snake_case__ , exist_ok=snake_case__ )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=snake_case__ )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , 'w' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(snake_case__ ):
with open(snake_case__ , 'r' ) as f:
lowerCAmelCase : Union[str, Any] = f.readlines()
with open(snake_case__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case__ , snake_case__ , snake_case__ ):
# Create temp file
lowerCAmelCase : Dict = mkstemp()
lowerCAmelCase : Any = False
with fdopen(snake_case__ , 'w' ) as new_file:
with open(snake_case__ ) as old_file:
for line in old_file:
new_file.write(snake_case__ )
if line_to_copy_below in line:
lowerCAmelCase : Tuple = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case__ )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(snake_case__ , snake_case__ )
# Remove original file
remove(snake_case__ )
# Move new file
move(snake_case__ , snake_case__ )
def skip_units(snake_case__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case__ ):
with open(snake_case__ ) as datafile:
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase : Dict = line.split('"' )[1]
lowerCAmelCase : List[str] = skip_units(snake_case__ )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase : Tuple = line.split('"' )[1]
lowerCAmelCase : Any = skip_units(snake_case__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Any = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase : Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(snake_case__ )
remove(snake_case__ )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(snake_case__ )
| 712
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646
| 0
|
from math import sqrt
def __UpperCamelCase ( _A : int = 1_00_00_00 ) -> int:
"""simple docstring"""
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 713
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : List[Any] = LDMTextToImagePipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : str = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(snake_case__ )
lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = LDMTextToImagePipeline(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowerCAmelCase : List[Any] = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Tuple = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
lowerCAmelCase : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self , snake_case__ , snake_case__=torch.floataa , snake_case__=0 ):
lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Any = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 32, 32) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images[0]
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 646
| 0
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_lowerCAmelCase : int = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_lowerCAmelCase : List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_lowerCAmelCase : List[str] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : bool , _A : Optional[Dict[int, int]] = None , _A : bool = False , ) -> Any:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase : str = new_id
# turn into Numpy arrays
lowerCAmelCase : List[str] = np.array(_A )
lowerCAmelCase : List[str] = np.array(_A )
if reduce_labels:
lowerCAmelCase : List[Any] = 2_55
lowerCAmelCase : Dict = label - 1
lowerCAmelCase : int = 2_55
lowerCAmelCase : Dict = label != ignore_index
lowerCAmelCase : Optional[Any] = np.not_equal(_A , _A )
lowerCAmelCase : List[Any] = pred_label[mask]
lowerCAmelCase : Any = np.array(_A )[mask]
lowerCAmelCase : List[str] = pred_label[pred_label == label]
lowerCAmelCase : Any = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase : Union[str, Any] = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase : int = np.histogram(_A , bins=_A , range=(0, num_labels - 1) )[0]
lowerCAmelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCamelCase ( _A : Optional[int] , _A : Dict , _A : Dict , _A : bool , _A : Optional[Dict[int, int]] = None , _A : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase : Any = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase : Tuple = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_A , _A ):
lowerCAmelCase : Any = intersect_and_union(
_A , _A , _A , _A , _A , _A )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCamelCase ( _A : Tuple , _A : Dict , _A : Optional[int] , _A : bool , _A : Optional[int] = None , _A : Optional[Dict[int, int]] = None , _A : bool = False , ) -> int:
"""simple docstring"""
lowerCAmelCase : List[str] = total_intersect_and_union(
_A , _A , _A , _A , _A , _A )
# compute metrics
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Any = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase : Optional[Any] = total_area_intersect / total_area_union
lowerCAmelCase : Union[str, Any] = total_area_intersect / total_area_label
lowerCAmelCase : Tuple = np.nanmean(_A )
lowerCAmelCase : Tuple = np.nanmean(_A )
lowerCAmelCase : int = all_acc
lowerCAmelCase : Optional[Any] = iou
lowerCAmelCase : str = acc
if nan_to_num is not None:
lowerCAmelCase : Optional[Any] = {metric: np.nan_to_num(_A , nan=_A ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase : Optional[Any] = mean_iou(
results=snake_case__ , gt_seg_maps=snake_case__ , num_labels=snake_case__ , ignore_index=snake_case__ , nan_to_num=snake_case__ , label_map=snake_case__ , reduce_labels=snake_case__ , )
return iou_result
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
lowerCAmelCase : int = int(number**0.5 )
return number == sq * sq
def __UpperCamelCase ( _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ) -> tuple[int, int]:
"""simple docstring"""
lowerCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase : int = x_den * y_den * z_den
lowerCAmelCase : int = gcd(_A , _A )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCamelCase ( _A : int = 35 ) -> int:
"""simple docstring"""
lowerCAmelCase : set = set()
lowerCAmelCase : int
lowerCAmelCase : Fraction = Fraction(0 )
lowerCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase : str = x_num * y_den + x_den * y_num
lowerCAmelCase : str = x_den * y_den
lowerCAmelCase : List[str] = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : Any = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
lowerCAmelCase : str = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_A ) and is_sq(_A ):
lowerCAmelCase : Tuple = int(sqrt(_A ) )
lowerCAmelCase : List[Any] = int(sqrt(_A ) )
lowerCAmelCase : str = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : str = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=-1
lowerCAmelCase : List[Any] = x_num * y_num
lowerCAmelCase : Dict = x_den * y_num + x_num * y_den
lowerCAmelCase : Any = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : int = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
lowerCAmelCase : List[Any] = x_num * x_num * y_num * y_num
lowerCAmelCase : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_A ) and is_sq(_A ):
lowerCAmelCase : List[str] = int(sqrt(_A ) )
lowerCAmelCase : Union[str, Any] = int(sqrt(_A ) )
lowerCAmelCase : str = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase : str = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
for num, den in unique_s:
total += Fraction(_A , _A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowerCAmelCase : str = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase : Any = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase : Optional[Any] = re.compile(r'\[([^\]]+)\]')
def __UpperCamelCase ( _A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Any = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def __UpperCamelCase ( _A : Dict , _A : Any="" , _A : List[str]=None , _A : Any=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
lowerCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
lowerCAmelCase : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : int = []
else:
blocks.append('\n'.join(_A ) )
lowerCAmelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __UpperCamelCase ( _A : Dict ) -> List[Any]:
"""simple docstring"""
def _inner(_A : Tuple ):
return key(_A ).lower().replace('_' , '' )
return _inner
def __UpperCamelCase ( _A : Union[str, Any] , _A : Any=None ) -> Optional[Any]:
"""simple docstring"""
def noop(_A : Any ):
return x
if key is None:
lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : str = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[str] = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_A )[0].isupper()]
lowerCAmelCase : Tuple = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def __UpperCamelCase ( _A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(_A : List[Any] ):
lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : List[str] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_A )] ) + "]"
lowerCAmelCase : Optional[int] = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(_A , key=lambda _A : x[1] )
lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : int = keys[:-1]
lowerCAmelCase : Tuple = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def __UpperCamelCase ( _A : str , _A : Tuple=True ) -> Optional[Any]:
"""simple docstring"""
with open(_A , 'r' ) as f:
lowerCAmelCase : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[Any] = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : List[str] = main_blocks[block_idx]
lowerCAmelCase : Union[str, Any] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Union[str, Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
lowerCAmelCase : List[Any] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = []
for i in range(len(_A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : str = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_A , 'w' ) as f:
f.write('\n'.join(_A ) )
def __UpperCamelCase ( _A : Tuple=True ) -> Any:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase : Any = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(F"Would overwrite {len(_A )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 646
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCamelCase ( _A : str , _A : dict ) -> str:
"""simple docstring"""
lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(_A , params=_A ).content , 'html.parser' )
lowerCAmelCase : List[str] = soup.find('div' , attrs={'class': 'gs_ri'} )
lowerCAmelCase : Dict = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 716
|
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646
| 0
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCamelCase ( _A : str , _A : str , _A : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Any = AlbertConfig.from_json_file(_A )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase : Optional[int] = AlbertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_A , _A , _A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 717
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( _A : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( _A : List[Any] , _A : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : List[str] = 'imagenet-1k-id2label.json'
lowerCAmelCase : Tuple = 10_00
lowerCAmelCase : str = 'huggingface/label-files'
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase : List[str] = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCAmelCase : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Any = [2, 2, 20]
lowerCAmelCase : List[str] = [3, 12, 16]
lowerCAmelCase : List[Any] = [1_92, 7_68, 10_24]
lowerCAmelCase : Union[str, Any] = CvtForImageClassification(_A )
lowerCAmelCase : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = torch.load(_A , map_location=torch.device('cpu' ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : List[str] = list_of_state_dict + cls_token(_A )
lowerCAmelCase : Optional[Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : List[Any] = list_of_state_dict + attention(_A , _A )
lowerCAmelCase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 646
| 0
|
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __UpperCamelCase ( _A , _A , _A=0 ) -> Optional[int]:
"""simple docstring"""
if name is None:
lowerCAmelCase : Optional[int] = None
else:
lowerCAmelCase : Any = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase : Dict = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , ':' , val.size() )
else:
print(_A , ':' , _A )
def __UpperCamelCase ( _A , _A , _A , _A , _A ) -> Any:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase : Tuple = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase : Tuple = param.view(*_A )
lowerCAmelCase : Any = param.transpose(0 , 2 )
lowerCAmelCase : Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase : Any = param.view(*_A )
lowerCAmelCase : str = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase : Optional[int] = param.view(*_A )
return param
def __UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {}
# old versions did not store training args
lowerCAmelCase : Any = input_state_dict.get('args' , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase : Optional[int] = ds_args.padded_vocab_size
lowerCAmelCase : Tuple = ds_args.max_position_embeddings
lowerCAmelCase : Dict = ds_args.hidden_size
lowerCAmelCase : Optional[int] = ds_args.num_layers
lowerCAmelCase : Union[str, Any] = ds_args.num_attention_heads
lowerCAmelCase : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase : str = config.n_head
# The hidden_size per head.
lowerCAmelCase : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase : int = input_state_dict['checkpoint_version']
else:
lowerCAmelCase : Optional[Any] = 0.0
# The model.
lowerCAmelCase : Tuple = input_state_dict['model']
# The language model.
lowerCAmelCase : int = model['language_model']
# The embeddings.
lowerCAmelCase : Dict = lm['embedding']
# The word embeddings.
lowerCAmelCase : Union[str, Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase : List[str] = word_embeddings[: config.vocab_size, :]
lowerCAmelCase : int = word_embeddings
# The position embeddings.
lowerCAmelCase : Optional[Any] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
lowerCAmelCase : int = pos_embeddings
# The transformer.
lowerCAmelCase : Union[str, Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase : Dict = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase : str = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase : Optional[Any] = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase : int = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase : str = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase : Tuple = m.group(3 )
# The name of the layer.
lowerCAmelCase : Any = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase : Tuple = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase : str = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowerCAmelCase : Optional[int] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase : Optional[int] = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase : str = masked_bias
lowerCAmelCase : int = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase : Optional[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase : Optional[Any] = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowerCAmelCase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase : List[str] = megatron_to_transformers[op_name]
lowerCAmelCase : Optional[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
lowerCAmelCase : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase : Tuple = transformer['final_layernorm.weight']
lowerCAmelCase : List[Any] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase : List[str] = word_embeddings
# It should be done!
return output_state_dict
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_A , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase : List[str] = parser.parse_args()
# Extract the basename.
lowerCAmelCase : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase : List[Any] = torch.load(_A , map_location='cpu' )
else:
lowerCAmelCase : Any = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase : str = input_state_dict.get('args' , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase : int = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase : List[str] = 'gelu_new'
else:
lowerCAmelCase : Any = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase : Union[str, Any] = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase : List[Any] = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
lowerCAmelCase : Dict = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase : str = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase : List[Any] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase : Dict = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase : int = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
lowerCAmelCase : List[str] = 'gpt2'
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_A )
lowerCAmelCase : Dict = type(_A ).__name__
lowerCAmelCase : str = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowerCAmelCase : Tuple = os.path.join(_A , 'pytorch_model.bin' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 718
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.2 , snake_case__=0.2 ):
lowerCAmelCase : Optional[Any] = bp_numa
lowerCAmelCase : List[str] = bp_numa
lowerCAmelCase : List[str] = bp_numa
lowerCAmelCase : List[Any] = conva_get[:2]
lowerCAmelCase : Union[str, Any] = conva_get[2]
lowerCAmelCase : Dict = size_pa
lowerCAmelCase : Dict = rate_w
lowerCAmelCase : List[Any] = rate_t
lowerCAmelCase : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase ( self , snake_case__ ):
# save model dict with pickle
lowerCAmelCase : Union[str, Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(snake_case__ , 'wb' ) as f:
pickle.dump(snake_case__ , snake_case__ )
print(f"Model saved: {save_path}" )
@classmethod
def lowercase ( cls , snake_case__ ):
# read saved model
with open(snake_case__ , 'rb' ) as f:
lowerCAmelCase : Tuple = pickle.load(snake_case__ ) # noqa: S301
lowerCAmelCase : Dict = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowerCAmelCase : Any = model_dic.get('size_pooling1' )
lowerCAmelCase : str = model_dic.get('num_bp1' )
lowerCAmelCase : Dict = model_dic.get('num_bp2' )
lowerCAmelCase : List[str] = model_dic.get('num_bp3' )
lowerCAmelCase : str = model_dic.get('rate_weight' )
lowerCAmelCase : List[str] = model_dic.get('rate_thre' )
# create model instance
lowerCAmelCase : int = CNN(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# modify model parameter
lowerCAmelCase : Optional[Any] = model_dic.get('w_conv1' )
lowerCAmelCase : Any = model_dic.get('wkj' )
lowerCAmelCase : int = model_dic.get('vji' )
lowerCAmelCase : str = model_dic.get('thre_conv1' )
lowerCAmelCase : Union[str, Any] = model_dic.get('thre_bp2' )
lowerCAmelCase : Dict = model_dic.get('thre_bp3' )
return conv_ins
def lowercase ( self , snake_case__ ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase ( self , snake_case__ ):
return round(snake_case__ , 3 )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# convolution process
lowerCAmelCase : List[str] = convs[0]
lowerCAmelCase : str = convs[1]
lowerCAmelCase : Tuple = np.shape(snake_case__ )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase : Any = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
lowerCAmelCase : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[str] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case__ ):
lowerCAmelCase : Optional[int] = []
for i_focus in range(len(snake_case__ ) ):
lowerCAmelCase : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case__ ) )
lowerCAmelCase : List[Any] = np.asmatrix(snake_case__ ).reshape(
snake_case__ , snake_case__ )
data_featuremap.append(snake_case__ )
# expanding the data slice to One dimenssion
lowerCAmelCase : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case__ ) )
lowerCAmelCase : int = np.asarray(snake_case__ )
return focus_list, data_featuremap
def lowercase ( self , snake_case__ , snake_case__ , snake_case__="average_pool" ):
# pooling process
lowerCAmelCase : Optional[Any] = len(featuremaps[0] )
lowerCAmelCase : List[str] = int(size_map / size_pooling )
lowerCAmelCase : int = []
for i_map in range(len(snake_case__ ) ):
lowerCAmelCase : List[Any] = featuremaps[i_map]
lowerCAmelCase : Union[str, Any] = []
for i_focus in range(0 , snake_case__ , snake_case__ ):
for j_focus in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = np.asmatrix(snake_case__ ).reshape(snake_case__ , snake_case__ )
featuremap_pooled.append(snake_case__ )
return featuremap_pooled
def lowercase ( self , snake_case__ ):
# expanding three dimension data to one dimension list
lowerCAmelCase : Optional[Any] = []
for i in range(len(snake_case__ ) ):
lowerCAmelCase : Optional[Any] = np.shape(data[i] )
lowerCAmelCase : Any = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case__ )
lowerCAmelCase : Optional[Any] = np.asarray(snake_case__ )
return data_expanded
def lowercase ( self , snake_case__ ):
# expanding matrix to one dimension list
lowerCAmelCase : List[str] = np.asarray(snake_case__ )
lowerCAmelCase : int = np.shape(snake_case__ )
lowerCAmelCase : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : Tuple = 0
for i_map in range(snake_case__ ):
lowerCAmelCase : Dict = np.ones((size_map, size_map) )
for i in range(0 , snake_case__ , snake_case__ ):
for j in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = pd_pool[
i_pool
]
lowerCAmelCase : Union[str, Any] = i_pool + 1
lowerCAmelCase : Tuple = np.multiply(
snake_case__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case__ )
return pd_all
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=bool ):
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(snake_case__ )) )
print((' - - Shape: Teach_Data ', np.shape(snake_case__ )) )
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[Any] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase : Tuple = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(snake_case__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase : List[str] = np.asmatrix(datas_train[p] )
lowerCAmelCase : List[Any] = np.asarray(datas_teach[p] )
lowerCAmelCase : Union[str, Any] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : Dict = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Dict = np.shape(snake_case__ )
lowerCAmelCase : Any = self._expand(snake_case__ )
lowerCAmelCase : Tuple = data_bp_input
lowerCAmelCase : Union[str, Any] = np.dot(snake_case__ , self.vji.T ) - self.thre_bpa
lowerCAmelCase : str = self.sig(snake_case__ )
lowerCAmelCase : Union[str, Any] = np.dot(snake_case__ , self.wkj.T ) - self.thre_bpa
lowerCAmelCase : Union[str, Any] = self.sig(snake_case__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase : List[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : str = np.multiply(
np.dot(snake_case__ , self.wkj ) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : List[str] = np.dot(snake_case__ , self.vji )
lowerCAmelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase : Tuple = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase : List[Any] = self._calculate_gradient_from_pool(
snake_case__ , snake_case__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase : int = self.rate_weight * np.dot(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase : str = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase : str = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase : int = rp + 1
lowerCAmelCase : int = error_count / patterns
all_mse.append(snake_case__ )
def draw_error():
lowerCAmelCase : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case__ , '+-' )
plt.plot(snake_case__ , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(snake_case__ , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def lowercase ( self , snake_case__ ):
# model predict
lowerCAmelCase : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(snake_case__ )) )
for p in range(len(snake_case__ ) ):
lowerCAmelCase : Dict = np.asmatrix(datas_test[p] )
lowerCAmelCase : str = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : Any = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Union[str, Any] = self._expand(snake_case__ )
lowerCAmelCase : Optional[Any] = data_bp_input
lowerCAmelCase : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase : Any = self.sig(snake_case__ )
lowerCAmelCase : Any = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase : List[str] = self.sig(snake_case__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase : Optional[int] = [list(map(self.do_round , snake_case__ ) ) for each in produce_out]
return np.asarray(snake_case__ )
def lowercase ( self , snake_case__ ):
# return the data of image after convoluting process so we can check it out
lowerCAmelCase : Optional[Any] = np.asmatrix(snake_case__ )
lowerCAmelCase : List[Any] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : int = self.pooling(snake_case__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 719
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646
| 0
|
'''simple docstring'''
def __UpperCamelCase ( _A : list[int] ) -> list[list[int]]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = []
if len(_A ) == 1:
return [nums.copy()]
for _ in range(len(_A ) ):
lowerCAmelCase : Optional[Any] = nums.pop(0 )
lowerCAmelCase : Optional[Any] = permute(_A )
for perm in permutations:
perm.append(_A )
result.extend(_A )
nums.append(_A )
return result
def __UpperCamelCase ( _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def backtrack(_A : List[Any] ):
if start == len(_A ) - 1:
output.append(nums[:] )
else:
for i in range(_A , len(_A ) ):
lowerCAmelCase : Optional[Any] = nums[i], nums[start]
backtrack(start + 1 )
lowerCAmelCase : Optional[Any] = nums[i], nums[start] # backtrack
lowerCAmelCase : Optional[Any] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowerCAmelCase : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 720
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( _A : Any , _A : Dict , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase : int = checkpoint['input_conv.weight_g']
lowerCAmelCase : Optional[int] = checkpoint['input_conv.weight_v']
lowerCAmelCase : Dict = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase : Optional[Any] = checkpoint[F"upsamples.{i}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.weight_v"]
lowerCAmelCase : str = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowerCAmelCase : str = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowerCAmelCase : int = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowerCAmelCase : Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowerCAmelCase : List[Any] = checkpoint['output_conv.1.weight_g']
lowerCAmelCase : List[str] = checkpoint['output_conv.1.weight_v']
lowerCAmelCase : Optional[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Any=None , _A : Any=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_A )
else:
lowerCAmelCase : Union[str, Any] = SpeechTaHifiGanConfig()
lowerCAmelCase : List[Any] = SpeechTaHifiGan(_A )
lowerCAmelCase : List[str] = torch.load(_A )
load_weights(orig_checkpoint['model']['generator'] , _A , _A )
lowerCAmelCase : Tuple = np.load(_A )
lowerCAmelCase : List[Any] = stats[0].reshape(-1 )
lowerCAmelCase : int = stats[1].reshape(-1 )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(_A ).float()
lowerCAmelCase : int = torch.from_numpy(_A ).float()
model.save_pretrained(_A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 646
| 0
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_lowerCAmelCase : List[Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def __UpperCamelCase ( _A : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __UpperCamelCase ( _A : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = 'Morse code here!'
print(_A )
lowerCAmelCase : List[Any] = encrypt(_A )
print(_A )
lowerCAmelCase : Optional[int] = decrypt(_A )
print(_A )
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=UpperCAmelCase__ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=UpperCAmelCase__ , default=5 )
parser.add_argument("--batch_size" , type=UpperCAmelCase__ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=UpperCAmelCase__ , default=1 )
parser.add_argument("--freeze" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
parser.add_argument("--learning_rate" , type=UpperCAmelCase__ , default=5e-4 )
parser.add_argument("--seed" , type=UpperCAmelCase__ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=UpperCAmelCase__ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=UpperCAmelCase__ , default=1_0 )
parser.add_argument("--weight_decay" , type=UpperCAmelCase__ , default=0.01 )
parser.add_argument("--output_dir" , type=UpperCAmelCase__ , default="./results" )
return parser.parse_args()
_lowerCamelCase : str = load('''accuracy''')
def __lowerCamelCase (UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_pred
SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )
class lowercase ( a ):
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> None:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = trainer
def __snake_case( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if control.should_evaluate:
SCREAMING_SNAKE_CASE = deepcopy(_UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = get_args()
set_seed(args.seed )
SCREAMING_SNAKE_CASE = load_dataset("codeparrot/codecomplex" , split="train" )
SCREAMING_SNAKE_CASE = dataset.train_test_split(test_size=0.2 )
SCREAMING_SNAKE_CASE = train_test["test"].train_test_split(test_size=0.5 )
SCREAMING_SNAKE_CASE = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
SCREAMING_SNAKE_CASE = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = tokenizer(example["src"] , truncation=UpperCAmelCase__ , max_length=1_0_2_4 )
SCREAMING_SNAKE_CASE = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
SCREAMING_SNAKE_CASE = train_test_validation.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=train_test_validation["train"].column_names , )
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(tokenizer=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
print("Training..." )
trainer.add_callback(CustomCallback(UpperCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 647
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
| 1
|
def __lowerCamelCase (UpperCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ):
SCREAMING_SNAKE_CASE = set()
# Replace all the whitespace in our sentence
SCREAMING_SNAKE_CASE = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase__ ) == 2_6
def __lowerCamelCase (UpperCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ):
SCREAMING_SNAKE_CASE = [False] * 2_6
for char in input_str:
if char.islower():
SCREAMING_SNAKE_CASE = True
elif char.isupper():
SCREAMING_SNAKE_CASE = True
return all(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ():
from timeit import timeit
SCREAMING_SNAKE_CASE = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=UpperCAmelCase__ ) )
print(timeit("is_pangram_faster()" , setup=UpperCAmelCase__ ) )
print(timeit("is_pangram_fastest()" , setup=UpperCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 647
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 1
|
from __future__ import annotations
class lowercase :
def __init__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text, pattern
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(_UpperCamelCase ), len(_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __snake_case( self : Optional[int] ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE = self.mismatch_in_text(_UpperCamelCase )
if mismatch_index == -1:
positions.append(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase : Optional[int] = '''ABAABA'''
_lowerCamelCase : Tuple = '''AB'''
_lowerCamelCase : str = BoyerMooreSearch(text, pattern)
_lowerCamelCase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 647
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 1
|
from scipy.stats import pearsonr
import datasets
_lowerCamelCase : Any = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_lowerCamelCase : Dict = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_lowerCamelCase : Union[str, Any] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE = pearsonr(_UpperCamelCase , _UpperCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )}
| 647
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """mra"""
def __init__( self : Optional[Any] , _UpperCamelCase : Union[str, Any]=50_265 , _UpperCamelCase : Dict=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : str=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : Tuple=1e-5 , _UpperCamelCase : List[Any]="absolute" , _UpperCamelCase : Any=4 , _UpperCamelCase : str="full" , _UpperCamelCase : List[str]=0 , _UpperCamelCase : Any=0 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=2 , **_UpperCamelCase : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = block_per_row
SCREAMING_SNAKE_CASE = approx_mode
SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 647
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase :
def __init__( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Dict=13 , _UpperCamelCase : Any=32 , _UpperCamelCase : str=2 , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[Any]=16 , _UpperCamelCase : int=[1, 2, 1] , _UpperCamelCase : Union[str, Any]=[2, 2, 4] , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : int=2.0 , _UpperCamelCase : List[str]=True , _UpperCamelCase : str=0.0 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : int=True , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : List[Any]=1e-5 , _UpperCamelCase : int=True , _UpperCamelCase : Tuple=None , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=10 , _UpperCamelCase : Dict=8 , _UpperCamelCase : List[Any]=["stage1", "stage2", "stage3"] , _UpperCamelCase : List[Any]=[1, 2, 3] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = use_absolute_embeddings
SCREAMING_SNAKE_CASE = patch_norm
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = encoder_stride
SCREAMING_SNAKE_CASE = out_features
SCREAMING_SNAKE_CASE = out_indices
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MaskFormerSwinModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MaskFormerSwinBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = ["stem"]
SCREAMING_SNAKE_CASE = MaskFormerSwinBackbone(config=_UpperCamelCase )
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Optional[Any] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : List[Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = False
lowercase__ : Optional[int] = False
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
pass
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case( self : List[Any] ) -> Dict:
'''simple docstring'''
return
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __snake_case( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = 0
return t
def check_equivalence(_UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase , return_dict=_UpperCamelCase , **_UpperCamelCase ).to_tuple()
def recursive_check(_UpperCamelCase : Optional[int] , _UpperCamelCase : str ):
if isinstance(_UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCamelCase , _UpperCamelCase ):
recursive_check(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_UpperCamelCase , _UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCamelCase ) , set_nan_tensor_to_zero(_UpperCamelCase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}. Dict has"
F" `nan`: {torch.isnan(_UpperCamelCase ).any()} and `inf`: {torch.isinf(_UpperCamelCase )}."
) , )
recursive_check(_UpperCamelCase , _UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
check_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {"output_hidden_states": True} )
@require_torch
class lowercase ( unittest.TestCase , a ):
lowercase__ : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : List[str] = MaskFormerSwinConfig
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MaskFormerSwinModelTester(self )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = backbone_class(_UpperCamelCase )
backbone.to(_UpperCamelCase )
backbone.eval()
SCREAMING_SNAKE_CASE = backbone(**_UpperCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _UpperCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE = backbone(**_UpperCamelCase , output_hidden_states=_UpperCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE = backbone(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertIsNotNone(outputs.attentions )
| 647
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 1
|
def __lowerCamelCase ():
return 1
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int ):
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int = 2_0_0 ):
return two_pound(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 647
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 1
|
def __lowerCamelCase (UpperCAmelCase__ : int = 1_0**9 ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase : Any = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 647
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowercase ( unittest.TestCase ):
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
| 647
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class lowercase ( a ):
lowercase__ : int = """roberta"""
def __init__( self : int , _UpperCamelCase : Union[str, Any]=50_265 , _UpperCamelCase : Any=768 , _UpperCamelCase : Any=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : Any=1e-12 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : List[Any]="absolute" , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class lowercase ( a ):
@property
def __snake_case( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 647
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : str = threading.Lock()
_lowerCamelCase : Optional[logging.Handler] = None
_lowerCamelCase : Any = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowerCamelCase : Union[str, Any] = logging.WARNING
_lowerCamelCase : List[Any] = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCamelCase ():
return __name__.split("." )[0]
def __lowerCamelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ():
return log_levels
def __lowerCamelCase (UpperCAmelCase__ : Optional[str] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase (UpperCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
return set_verbosity(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__ )
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ():
_configure_library_root_logger()
SCREAMING_SNAKE_CASE = True
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__ )
def __lowerCamelCase (self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ):
SCREAMING_SNAKE_CASE = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , UpperCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : str = warning_advice
@functools.lru_cache(UpperCAmelCase__ )
def __lowerCamelCase (self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__ )
_lowerCamelCase : Dict = warning_once
class lowercase :
def __init__( self : List[Any] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Optional[Any] ) -> str:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
def empty_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self
def __exit__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
class lowercase :
def __call__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCamelCase , **_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def __lowerCamelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ():
global _tqdm_active
SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 647
| 1
|
def __lowerCamelCase (UpperCAmelCase__ : list[list] ):
SCREAMING_SNAKE_CASE = current_set.copy()
for row_index, row in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = row[0]
for column_index, column in enumerate(UpperCAmelCase__ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE = column
continue
SCREAMING_SNAKE_CASE = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE = current_set[0]
SCREAMING_SNAKE_CASE = [first_row]
SCREAMING_SNAKE_CASE = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCAmelCase__ )
continue
for column_index in range(len(UpperCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE = final_set[0]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE = simplify(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = resultant
return final_set
def __lowerCamelCase (UpperCAmelCase__ : list[list] ):
if len(UpperCAmelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) + 1
if any(len(UpperCAmelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(UpperCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(UpperCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE = data_set.copy()
SCREAMING_SNAKE_CASE = []
for row_index, row in enumerate(UpperCAmelCase__ ):
if 0 not in row:
SCREAMING_SNAKE_CASE = data_set.pop(UpperCAmelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = data_set.copy()
SCREAMING_SNAKE_CASE = simplify(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = simplified[::-1]
SCREAMING_SNAKE_CASE = []
for row in simplified:
SCREAMING_SNAKE_CASE = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE = row.copy()[: len(UpperCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCAmelCase__ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE = temp_row[1::]
SCREAMING_SNAKE_CASE = temp_row[::-1]
for column_index, column in enumerate(UpperCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = []
for item in solutions:
final.append(float(round(UpperCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 647
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Any = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['''MobileNetV2FeatureExtractor''']
_lowerCamelCase : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647
| 1
|
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : Dict = 6_378_137.0
_lowerCamelCase : List[str] = 6_356_752.314_245
_lowerCamelCase : str = 6_37_81_37
def __lowerCamelCase (UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ):
SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = radians(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = radians(UpperCAmelCase__ )
# Equation
SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(UpperCAmelCase__ ) * cos(UpperCAmelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] = None ):
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else ""
# apply OCR
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : str , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE = [flip_channel_order(_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 647
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowercase ( a ):
def __init__( self : Optional[int] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for element in self.data:
yield element
def __lowerCamelCase (UpperCAmelCase__ : int=True ):
SCREAMING_SNAKE_CASE = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __lowerCamelCase (UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ):
if iterable:
SCREAMING_SNAKE_CASE = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
SCREAMING_SNAKE_CASE = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
return dl
def __lowerCamelCase (UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , ):
SCREAMING_SNAKE_CASE = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = create_accelerator(even_batches=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = create_accelerator(even_batches=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = create_accelerator(even_batches=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = create_accelerator()
SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
SCREAMING_SNAKE_CASE = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = original_state
if __name__ == "__main__":
main()
| 647
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowerCamelCase : Union[str, Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE = r".*/layers_(\d+)"
SCREAMING_SNAKE_CASE = key
if re.match(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = re.sub(r"layers_(\d+)" , r"block/\1/layer" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = r"(encoder|decoder)\/"
if re.match(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = re.match(UpperCAmelCase__ , UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE = re.sub(r"/mlp/" , r"/1/mlp/" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , UpperCAmelCase__ )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE = re.sub(r"/mlp/" , r"/2/mlp/" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE = new_key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"{key} -> {new_key}" )
SCREAMING_SNAKE_CASE = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(UpperCAmelCase__ )
return s_dict
_lowerCamelCase : Optional[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__ , "r" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = re.findall(r"(.*) = ([0-9.]*)" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE = float(UpperCAmelCase__ ) if "." in value else int(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re.findall(r"(.*activations) = \(\'(.*)\',\)" , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = str(activation[1] )
SCREAMING_SNAKE_CASE = num_experts
SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]="./" , UpperCAmelCase__ : Any=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
SCREAMING_SNAKE_CASE = convert_gin_to_config(UpperCAmelCase__ , UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = flax_params["target"]
SCREAMING_SNAKE_CASE = flatten_dict(UpperCAmelCase__ , sep="/" )
SCREAMING_SNAKE_CASE = rename_keys(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = unflatten_dict(UpperCAmelCase__ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__ , UpperCAmelCase__ )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
_lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 647
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase ( a ):
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , "num_encoder_blocks" ) )
class lowercase :
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Dict=[2, 2, 2, 2] , _UpperCamelCase : Union[str, Any]=[8, 4, 2, 1] , _UpperCamelCase : Optional[int]=[16, 32, 64, 128] , _UpperCamelCase : Union[str, Any]=[1, 4, 8, 16] , _UpperCamelCase : List[str]=[1, 2, 4, 8] , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Optional[int]=None , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_encoder_blocks
SCREAMING_SNAKE_CASE = sr_ratios
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = downsampling_rates
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SegformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : str = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[Any] = True
lowercase__ : Union[str, Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : int = False
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SegformerModelTester(self )
SCREAMING_SNAKE_CASE = SegformerConfigTester(self , config_class=_UpperCamelCase )
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCamelCase )
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCamelCase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
SCREAMING_SNAKE_CASE = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCamelCase ):
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
pass
@slow
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SegformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-1 ) )
@slow
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(_UpperCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 647
|
import functools
def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ):
# Validation
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase__ ) == 0:
return 0
if min(UpperCAmelCase__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase__ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ )
@functools.cache
def dynamic_programming(UpperCAmelCase__ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase : str = logging.getLogger(__name__)
@dataclass
class lowercase :
lowercase__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ : bool = field(default=a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowercase :
lowercase__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
lowercase__ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ : bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
SCREAMING_SNAKE_CASE = import_module("tasks" )
try:
SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , model_args.task_type )
SCREAMING_SNAKE_CASE = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
SCREAMING_SNAKE_CASE = token_classification_task.get_labels(data_args.labels )
SCREAMING_SNAKE_CASE = dict(enumerate(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
SCREAMING_SNAKE_CASE = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray ) -> Tuple[List[int], List[int]]:
SCREAMING_SNAKE_CASE = np.argmax(UpperCAmelCase__ , axis=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = preds.shape
SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
SCREAMING_SNAKE_CASE = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = trainer.predict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 647
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase (UpperCAmelCase__ : int ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
SCREAMING_SNAKE_CASE = []
for num in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def __lowerCamelCase ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( a , unittest.TestCase ):
lowercase__ : Dict = KandinskyVaaInpaintPipeline
lowercase__ : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowercase__ : Union[str, Any] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowercase__ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ : int = False
@property
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return 32
@property
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_unet
SCREAMING_SNAKE_CASE = self.dummy_movq
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any]=0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = 0
if str(_UpperCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "cpu"
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __snake_case( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = "a hat"
SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 647
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647
| 1
|
from math import factorial, radians
def __lowerCamelCase (UpperCAmelCase__ : float , UpperCAmelCase__ : int = 1_8 , UpperCAmelCase__ : int = 1_0 ):
SCREAMING_SNAKE_CASE = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE = radians(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = angle_in_radians
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = -1
for _ in range(UpperCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 647
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 1
|
def __lowerCamelCase (UpperCAmelCase__ : int = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE = set(range(3 , UpperCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase__ , UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = [float(UpperCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase__ , limit + 1 , UpperCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 647
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase ( unittest.TestCase ):
def __snake_case( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
SCREAMING_SNAKE_CASE = InstructBlipProcessor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case( self : List[str] , **_UpperCamelCase : int ) -> str:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).tokenizer
def __snake_case( self : List[str] , **_UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).image_processor
def __snake_case( self : str , **_UpperCamelCase : int ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).qformer_tokenizer
def __snake_case( self : int ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
self.assertIsInstance(processor.qformer_tokenizer , _UpperCamelCase )
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = qformer_tokenizer(_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 647
|
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float = 1e-12 , UpperCAmelCase__ : int = 1_0_0 , ):
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(UpperCAmelCase__ , np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(UpperCAmelCase__ , UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 647
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase : str = HfArgumentParser(InitializationArguments)
_lowerCamelCase : Optional[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase : str = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase : int = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 647
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Any=48_000 , _UpperCamelCase : Optional[Any]=480 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Any=False , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 14_000 , _UpperCamelCase : int = None , _UpperCamelCase : str = "fusion" , _UpperCamelCase : str = "repeatpad" , **_UpperCamelCase : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm=_UpperCamelCase , mel_scale="htk" , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_UpperCamelCase , min_frequency=_UpperCamelCase , max_frequency=_UpperCamelCase , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case( self : str ) -> Dict[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.array , _UpperCamelCase : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = spectrogram(
_UpperCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_UpperCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
_UpperCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=_UpperCamelCase )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case( self : Optional[int] , _UpperCamelCase : np.array , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = np.pad(_UpperCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(_UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Dict , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : str = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Tuple , ) -> BatchFeature:
'''simple docstring'''
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(_UpperCamelCase , max_length if max_length else self.nb_max_samples , _UpperCamelCase , _UpperCamelCase )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(_UpperCamelCase )
is_longer.append(_UpperCamelCase )
if truncation == "fusion" and sum(_UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(_UpperCamelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(_UpperCamelCase )
return input_features
| 647
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase ( a , a ):
lowercase__ : str = """nat"""
lowercase__ : Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , _UpperCamelCase : str=4 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Any=64 , _UpperCamelCase : int=[3, 4, 6, 5] , _UpperCamelCase : List[str]=[2, 4, 8, 16] , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3.0 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : str=0.1 , _UpperCamelCase : int="gelu" , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Optional[Any]=1e-5 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None , **_UpperCamelCase : str , ) -> Any:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = ["stem"] + [F"stage{idx}" for idx in range(1 , len(_UpperCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
| 647
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowercase ( nn.Module ):
def __init__( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def __snake_case( self : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_UpperCamelCase ) ) )
class lowercase ( unittest.TestCase ):
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCamelCase , model.state_dict() )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , "index.json" )
self.assertTrue(os.path.isfile(_UpperCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , F"{key}.dat" )
self.assertTrue(os.path.isfile(_UpperCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def __snake_case( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 , dtype=_UpperCamelCase )
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = offload_weight(_UpperCamelCase , "weight" , _UpperCamelCase , {} )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , "weight.dat" )
self.assertTrue(os.path.isfile(_UpperCamelCase ) )
self.assertDictEqual(_UpperCamelCase , {"weight": {"shape": [2, 3], "dtype": str(_UpperCamelCase ).split("." )[1]}} )
SCREAMING_SNAKE_CASE = load_offloaded_weight(_UpperCamelCase , index["weight"] )
self.assertTrue(torch.equal(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ModelForTest()
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if "linear2" not in k}
SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=_UpperCamelCase , save_folder=_UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCamelCase , weight_map[key] ) )
SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if "weight" in k}
SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=_UpperCamelCase , save_folder=_UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCamelCase , _UpperCamelCase )
# Duplicates are removed
SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=_UpperCamelCase , save_folder=_UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCamelCase , weight_map[key] ) )
def __snake_case( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"a.1": 0, "a.10": 1, "a.2": 2}
SCREAMING_SNAKE_CASE = extract_submodules_state_dict(_UpperCamelCase , ["a.1", "a.2"] )
self.assertDictEqual(_UpperCamelCase , {"a.1": 0, "a.2": 2} )
SCREAMING_SNAKE_CASE = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
SCREAMING_SNAKE_CASE = extract_submodules_state_dict(_UpperCamelCase , ["a.1", "a.2"] )
self.assertDictEqual(_UpperCamelCase , {"a.1.a": 0, "a.2.a": 2} )
| 647
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE = F"The input value of [n={number}] has to be > 0"
raise ValueError(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = sylvester(number - 1 )
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 647
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase ( a ):
lowercase__ : Dict = """segformer"""
def __init__( self : str , _UpperCamelCase : str=3 , _UpperCamelCase : int=4 , _UpperCamelCase : Any=[2, 2, 2, 2] , _UpperCamelCase : str=[8, 4, 2, 1] , _UpperCamelCase : List[Any]=[32, 64, 160, 256] , _UpperCamelCase : Optional[Any]=[7, 3, 3, 3] , _UpperCamelCase : Tuple=[4, 2, 2, 2] , _UpperCamelCase : List[Any]=[1, 2, 5, 8] , _UpperCamelCase : int=[4, 4, 4, 4] , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : int=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[Any]=1e-6 , _UpperCamelCase : Tuple=256 , _UpperCamelCase : Optional[Any]=255 , **_UpperCamelCase : Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , _UpperCamelCase , )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_encoder_blocks
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = sr_ratios
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = decoder_hidden_size
SCREAMING_SNAKE_CASE = kwargs.get("reshape_last_stage" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class lowercase ( a ):
lowercase__ : str = version.parse("""1.11""" )
@property
def __snake_case( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case( self : Any ) -> float:
'''simple docstring'''
return 1e-4
@property
def __snake_case( self : Any ) -> int:
'''simple docstring'''
return 12
| 647
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase ( unittest.TestCase ):
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
@require_multi_gpu
def __snake_case( self : int ) -> int:
'''simple docstring'''
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
SCREAMING_SNAKE_CASE = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : str = Accelerator()
_lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10)
_lowerCamelCase : str = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase : Optional[Any] = ''''''
_lowerCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase : int = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 647
| 1
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCAmelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
_lowerCamelCase : str = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 647
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Tuple = (KDPMaDiscreteScheduler,)
lowercase__ : Optional[int] = 10
def __snake_case( self : Optional[Any] , **_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
if str(_UpperCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 647
| 1
|
def __lowerCamelCase (UpperCAmelCase__ : int ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE = False
if num < 0:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = -num
SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCAmelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCAmelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowercase ( a ):
lowercase__ : Optional[Any] = """ernie_m"""
lowercase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int] , _UpperCamelCase : int = 250_002 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 3_072 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 514 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=0.0 , **_UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = act_dropout
| 647
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class lowercase ( unittest.TestCase ):
def __snake_case( self : int ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=_UpperCamelCase , )
assert hasattr(self , "env" )
def __snake_case( self : int , _UpperCamelCase : Optional[Any]=1 ) -> Union[str, Any]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=_UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def __snake_case( self : str , _UpperCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
TrainingJobAnalytics(_UpperCamelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def __snake_case( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _UpperCamelCase )
| 647
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
| 1
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase :
def __init__( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=100 , _UpperCamelCase : Any=13 , _UpperCamelCase : Optional[Any]=30 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=32 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=10 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : int=3 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=[0, 1, 2, 3] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 100
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __snake_case( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __snake_case( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = BeitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = BeitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : int = False
lowercase__ : Any = False
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCamelCase ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
def __snake_case( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = BeitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values.to(_UpperCamelCase )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=_UpperCamelCase , bool_masked_pos=_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCamelCase , atol=1e-2 ) )
@slow
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCamelCase )
@slow
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCamelCase )
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=_UpperCamelCase , size=640 , do_center_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=_UpperCamelCase , size=640 , do_center_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 647
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.