code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''new-model'''
if is_tf_available():
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = NewModelConfig
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'bert-base-cased'
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'bert-base-cased'
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def _A ( self ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(_A , _A )
__SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE = ['FunnelBaseModel']
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def _A ( self ):
'''simple docstring'''
try:
AutoConfig.register('new-model' , _A )
__SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _A ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier' ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('bert-base' )
def _A ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_A , revision='aaaaaa' )
def _A ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _A ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model' ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 257
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def _A ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 257
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[str, Any] = '''yolos'''
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=[512, 864] , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=100 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
snake_case : List[str] = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : List[str] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : List[str] = initializer_range
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[str] = image_size
snake_case : Tuple = patch_size
snake_case : Any = num_channels
snake_case : Optional[Any] = qkv_bias
snake_case : Optional[Any] = num_detection_tokens
snake_case : Union[str, Any] = use_mid_position_embeddings
snake_case : int = auxiliary_loss
# Hungarian matcher
snake_case : List[Any] = class_cost
snake_case : int = bbox_cost
snake_case : Any = giou_cost
# Loss coefficients
snake_case : List[Any] = bbox_loss_coefficient
snake_case : Optional[Any] = giou_loss_coefficient
snake_case : Dict = eos_coefficient
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[Any] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return 12
| 359
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : str = '''LayoutLMv2ImageProcessor'''
__UpperCAmelCase : Dict = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
snake_case : Dict = kwargs.pop("feature_extractor" )
snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case : Any = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case : Optional[Any] = features["words"]
snake_case : Dict = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel values
snake_case : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case : Any = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs["overflow_to_sample_mapping"] )
snake_case : str = images
return encoded_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}' )
return images_with_overflow
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 112
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCAmelCase : List[Any] = sys.version_info >= (3, 1_0)
def __magic_name__( lowerCamelCase=None, lowerCamelCase=None):
return field(default_factory=lambda: default, metadata=a__)
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : float
__UpperCamelCase : str
__UpperCamelCase : bool
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : int = 42
__UpperCamelCase : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[bool] = None
class a__ ( UpperCamelCase_ ):
"""simple docstring"""
__UpperCamelCase : str = '''titi'''
__UpperCamelCase : Any = '''toto'''
class a__ ( UpperCamelCase_ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''titi'''
__UpperCamelCase : Union[str, Any] = '''toto'''
__UpperCamelCase : List[str] = 42
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : BasicEnum = "toto"
def _snake_case (self ):
__lowerCAmelCase = BasicEnum(self.foo )
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : MixedTypeEnum = "toto"
def _snake_case (self ):
__lowerCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[float] = field(default=UpperCamelCase_ , metadata={'help': 'help message'} )
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[List[str]] = list_field(default=[] )
__UpperCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : List[int] = list_field(default=[] )
__UpperCamelCase : List[int] = list_field(default=[1, 2, 3] )
__UpperCamelCase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : List[int] = field()
__UpperCamelCase : str = field()
__UpperCamelCase : BasicEnum = field()
def _snake_case (self ):
__lowerCAmelCase = BasicEnum(self.required_enum )
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : "BasicEnum" = field()
__UpperCamelCase : "Optional[bool]" = None
__UpperCamelCase : "str" = field(default='toto' , metadata={'help': 'help message'} )
__UpperCamelCase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : bool | None = None
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : int | None = None
__UpperCamelCase : float | None = field(default=UpperCamelCase_ , metadata={'help': 'help message'} )
__UpperCamelCase : str | None = None
__UpperCamelCase : list[str] | None = list_field(default=[] )
__UpperCamelCase : list[int] | None = list_field(default=[] )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__lowerCAmelCase = {k: v for k, v in vars(_A ).items() if k != '''container'''}
__lowerCAmelCase = {k: v for k, v in vars(_A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _A ) and yy.get('''choices''' , _A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_A ) , yy['''type'''](_A ) )
del xx["type"], yy["type"]
self.assertEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , required=_A )
expected.add_argument('''--bar''' , type=_A , required=_A )
expected.add_argument('''--baz''' , type=_A , required=_A )
expected.add_argument('''--flag''' , type=_A , default=_A , const=_A , nargs='''?''' )
self.argparsersEqual(_A , _A )
__lowerCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__lowerCAmelCase ) , ) = parser.parse_args_into_dataclasses(_A , look_for_args_file=_A )
self.assertFalse(example.flag )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_A )
expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' )
self.argparsersEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , default=_A , const=_A , nargs='''?''' )
expected.add_argument('''--baz''' , type=_A , default=_A , const=_A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_A , dest='''baz''' )
expected.add_argument('''--opt''' , type=_A , default=_A )
__lowerCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_A )
for dataclass_type in dataclass_types:
__lowerCAmelCase = HfArgumentParser(_A )
self.argparsersEqual(_A , _A )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_A , _A )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__lowerCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__lowerCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _snake_case (self ):
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : Literal["titi", "toto", 42] = "toto"
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_A , _A )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__lowerCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_A )
self.argparsersEqual(_A , _A )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(
_A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__lowerCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _snake_case (self ):
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_A , type=_A )
expected.add_argument('''--bar''' , default=_A , type=_A , help='''help message''' )
expected.add_argument('''--baz''' , default=_A , type=_A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_A )
__lowerCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_A )
for dataclass_type in dataclass_types:
__lowerCAmelCase = HfArgumentParser(_A )
self.argparsersEqual(_A , _A )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(_A , Namespace(foo=_A , bar=_A , baz=_A , ces=[] , des=[] ) )
__lowerCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_A , Namespace(foo=12 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_A , required=_A )
expected.add_argument('''--required_str''' , type=_A , required=_A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , )
self.argparsersEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , required=_A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , )
expected.add_argument('''--opt''' , type=_A , default=_A )
expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A )
self.argparsersEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
__lowerCAmelCase = parser.parse_dict(_A )[0]
__lowerCAmelCase = BasicExample(**_A )
self.assertEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_A , parser.parse_dict , _A , allow_extra_keys=_A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(_A , '''temp_json''' )
os.mkdir(_A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_A , _A )
__lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__lowerCAmelCase = BasicExample(**_A )
self.assertEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
__lowerCAmelCase = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(_A , '''temp_yaml''' )
os.mkdir(_A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_A , _A )
__lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__lowerCAmelCase = BasicExample(**_A )
self.assertEqual(_A , _A )
def _snake_case (self ):
__lowerCAmelCase = HfArgumentParser(_A )
self.assertIsNotNone(_A )
| 174
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = process
__SCREAMING_SNAKE_CASE = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dataset[i]
__SCREAMING_SNAKE_CASE = self.process(_A , **self.params )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = loader
__SCREAMING_SNAKE_CASE = infer
__SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _A ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE = next(self.iterator )
__SCREAMING_SNAKE_CASE = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
__SCREAMING_SNAKE_CASE = None
return self
def _A ( self ):
'''simple docstring'''
if self.subiterator is None:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
return accumulator
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = keya
__SCREAMING_SNAKE_CASE = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 257
| 0
|
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : int | None = None ):
snake_case__ : Tuple = value
snake_case__ : Node | None = None # Added in order to delete a node easier
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def __repr__( self : str ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : Node | None = None ):
snake_case__ : List[Any] = root
def __str__( self : Dict ):
return str(self.root )
def _lowercase ( self : int , __A : Node , __A : Node | None ):
if new_children is not None: # reset its kids
snake_case__ : Tuple = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__A ): # If it is the right children
snake_case__ : Optional[Any] = new_children
else:
snake_case__ : Dict = new_children
else:
snake_case__ : Any = new_children
def _lowercase ( self : str , __A : Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self : Optional[Any] ):
return self.root is None
def _lowercase ( self : str , __A : Tuple ):
snake_case__ : List[Any] = Node(__A ) # create a new Node
if self.empty(): # if Tree is empty
snake_case__ : Any = new_node # set its root
else: # Tree is not empty
snake_case__ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case__ : List[Any] = new_node # We insert the new node in a leaf
break
else:
snake_case__ : Optional[Any] = parent_node.left
else:
if parent_node.right is None:
snake_case__ : str = new_node
break
else:
snake_case__ : str = parent_node.right
snake_case__ : int = parent_node
def _lowercase ( self : List[Any] , *__A : Union[str, Any] ):
for value in values:
self.__insert(__A )
def _lowercase ( self : Dict , __A : List[Any] ):
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
snake_case__ : Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case__ : Tuple = node.left if value < node.value else node.right
return node
def _lowercase ( self : Optional[int] , __A : Node | None = None ):
if node is None:
if self.root is None:
return None
snake_case__ : List[str] = self.root
if not self.empty():
while node.right is not None:
snake_case__ : Dict = node.right
return node
def _lowercase ( self : int , __A : Node | None = None ):
if node is None:
snake_case__ : List[Any] = self.root
if self.root is None:
return None
if not self.empty():
snake_case__ : str = self.root
while node.left is not None:
snake_case__ : Union[str, Any] = node.left
return node
def _lowercase ( self : Any , __A : int ):
snake_case__ : Any = self.search(__A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__A , __A )
elif node.left is None: # Has only right children
self.__reassign_nodes(__A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__A , node.left )
else:
snake_case__ : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case__ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self : Any , __A : Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self : str , __A : Union[str, Any]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self : Union[str, Any] , __A : list , __A : Node | None ):
if node:
self.inorder(__A , node.left )
arr.append(node.value )
self.inorder(__A , node.right )
def _lowercase ( self : Tuple , __A : int , __A : Node ):
snake_case__ : list[int] = []
self.inorder(__A , __A ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None ):
snake_case__ : Optional[int] = []
if curr_node is not None:
snake_case__ : Dict = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case__ : str = BinarySearchTree()
for i in testlist:
t.insert(snake_case_ )
# Prints all the elements of the list in order traversal
print(snake_case_ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 356
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__, snake_case__ : Optional[Any] = emb.weight.shape
snake_case__ : Tuple = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
snake_case__ : Optional[int] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[Any]="facebook/mbart-large-en-ro" , snake_case_ : Optional[int]=False , snake_case_ : List[Any]=False ):
snake_case__ : Tuple = torch.load(snake_case_ , map_location="cpu" )["model"]
remove_ignore_keys_(snake_case_ )
snake_case__ : Any = state_dict["encoder.embed_tokens.weight"].shape[0]
snake_case__ : List[Any] = MBartConfig.from_pretrained(snake_case_ , vocab_size=snake_case_ )
if mbart_aa and finetuned:
snake_case__ : int = "relu"
snake_case__ : List[str] = state_dict["decoder.embed_tokens.weight"]
snake_case__ : Tuple = MBartForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ )
if finetuned:
snake_case__ : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 286
| 0
|
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : bool = False )->bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
A__ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
A__ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
A__ = primes[:idx]
break
A__ , A__ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A__ = False
for r in range(UpperCamelCase__ ):
A__ = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A__ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCamelCase__( )->None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 193
|
a__: Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a__: str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] )->list[int]:
A__ = True
A__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] )->list[int]:
A__ = True
A__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] )->list[list[int]]:
A__ = len(UpperCamelCase__ ) * [False]
A__ = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
A__ = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = []
A__ = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
A__ = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
A__ = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 193
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[tf.Tensor, np.ndarray] ):
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase : Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowerCAmelCase : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def _snake_case ( _snake_case : tf.Tensor , _snake_case : Optional[int] = None , _snake_case : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def _snake_case ( _snake_case : Any , _snake_case : List[str] , _snake_case : Dict , _snake_case : Tuple=1E-5 , _snake_case : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCAmelCase : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase : List[Any] = [1] * inputs.shape.rank
lowerCAmelCase : List[str] = shape_list(UpperCAmelCase__ )[axis]
lowerCAmelCase : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase : str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def _snake_case ( _snake_case : Dict , _snake_case : Tuple=0 , _snake_case : Any=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase : List[Any] = tf.shape(UpperCAmelCase__ )
lowerCAmelCase : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def _snake_case ( _snake_case : tf.Tensor ):
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowerCAmelCase : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( _snake_case : tf.Tensor , _snake_case : int , _snake_case : str = "input_ids" ):
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _snake_case ( _snake_case : List[Any] , _snake_case : str , _snake_case : List[str] ):
lowerCAmelCase : int = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowerCAmelCase : Any = np.asarray(UpperCAmelCase__ )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = chunk_data
else:
lowerCAmelCase : Any = data
def _snake_case ( _snake_case : str , _snake_case : Union[str, Any] ):
if name in group.attrs:
lowerCAmelCase : Optional[Any] = [n.decode('''utf8''' ) if hasattr(UpperCAmelCase__ , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(UpperCAmelCase__ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( _snake_case : Optional[Any] ):
def _expand_single_ad_tensor(_snake_case : Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 371
|
"""simple docstring"""
from math import sqrt
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase : int = False
break
# precondition
assert isinstance(_snake_case , _snake_case ), "'status' must been from type bool"
return status
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
lowerCAmelCase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase : Any = 0
# filters actual prime numbers.
lowerCAmelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase : Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_snake_case ):
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_snake_case ):
while quotient != 1:
if is_prime(_snake_case ) and (quotient % factor == 0):
ans.append(_snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( _snake_case : Tuple ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
lowerCAmelCase : Optional[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Any = max(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Dict ):
assert isinstance(_snake_case , _snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : int = 0
# prime factorization of 'number'
lowerCAmelCase : List[Any] = prime_factorization(_snake_case )
lowerCAmelCase : Optional[int] = min(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( _snake_case : List[str] ):
assert isinstance(_snake_case , _snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( _snake_case : Tuple ):
assert (
isinstance(_snake_case , _snake_case ) and (number > 2) and is_even(_snake_case )
), "'number' must been an int, even and > 2"
lowerCAmelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase : Union[str, Any] = get_prime_numbers(_snake_case )
lowerCAmelCase : Optional[Any] = len(_snake_case )
# run variable for while-loops.
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase : str = True
while i < len_pn and loop:
lowerCAmelCase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase : Dict = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (len(_snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Dict = 0
while numbera != 0:
lowerCAmelCase : Union[str, Any] = numbera % numbera
lowerCAmelCase : List[Any] = numbera
lowerCAmelCase : List[Any] = rest
# precondition
assert isinstance(_snake_case , _snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase : List[str] = prime_factorization(_snake_case )
lowerCAmelCase : Union[str, Any] = prime_factorization(_snake_case )
elif numbera == 1 or numbera == 1:
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[str] = max(_snake_case , _snake_case )
lowerCAmelCase : Dict = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase : List[str] = prime_fac_a.count(_snake_case )
lowerCAmelCase : Any = prime_fac_a.count(_snake_case )
for _ in range(max(_snake_case , _snake_case ) ):
ans *= n
else:
lowerCAmelCase : Union[str, Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase : List[Any] = prime_fac_a.count(_snake_case )
for _ in range(_snake_case ):
ans *= n
done.append(_snake_case )
# precondition
assert isinstance(_snake_case , _snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( _snake_case : Any ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_snake_case ):
ans += 1
# precondition
assert isinstance(_snake_case , _snake_case ) and is_prime(
_snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( _snake_case : Any , _snake_case : Dict ):
assert (
is_prime(_snake_case ) and is_prime(_snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase : Optional[int] = p_number_a + 1 # jump to the next number
lowerCAmelCase : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
while number < p_number_a:
ans.append(_snake_case )
number += 1
# fetch the next prime number.
while not is_prime(_snake_case ):
number += 1
# precondition
assert (
isinstance(_snake_case , _snake_case )
and ans[0] != p_number_a
and ans[len(_snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( _snake_case : List[Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_snake_case )
# precondition
assert ans[0] == 1 and ans[len(_snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase : int = get_divisors(_snake_case )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (divisors[0] == 1)
and (divisors[len(_snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] ):
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase : int = gcd(abs(_snake_case ) , abs(_snake_case ) )
# precondition
assert (
isinstance(_snake_case , _snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( _snake_case : Optional[int] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( _snake_case : Union[str, Any] ):
assert isinstance(_snake_case , _snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 1
lowerCAmelCase : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase : int = ans
ans += fiba
lowerCAmelCase : Optional[Any] = tmp
return ans
| 314
| 0
|
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -1_0_0
UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , UpperCamelCase__ )
UpperCamelCase = hidden.index_select(0 , UpperCamelCase__ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A ( self : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 28
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 0
|
SCREAMING_SNAKE_CASE : int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
SCREAMING_SNAKE_CASE : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
_lowercase : Optional[int] = True
_lowercase : List[Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
order.append(lowerCamelCase_ )
return order
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
_lowercase : Tuple = True
_lowercase : Tuple = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return component
def UpperCamelCase_( lowerCamelCase_ ) -> list[list[int]]:
_lowercase : List[Any] = len(lowerCamelCase_ ) * [False]
_lowercase : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCamelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase_ )
_lowercase : Any = []
for i, was_visited in enumerate(lowerCamelCase_ ):
if not was_visited:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = []
_lowercase : str = len(lowerCamelCase_ ) * [False]
for i in range(len(lowerCamelCase_ ) ):
_lowercase : Dict = order[len(lowerCamelCase_ ) - i - 1]
if not visited[vert]:
_lowercase : Tuple = find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
components_list.append(lowerCamelCase_ )
return components_list
| 84
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE : Optional[int] = 299792458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = symbols("ct x y z")
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def UpperCamelCase_( lowerCamelCase_ ) -> float:
return 1 / sqrt(1 - beta(lowerCamelCase_ ) ** 2 )
def UpperCamelCase_( lowerCamelCase_ ) -> np.ndarray:
return np.array(
[
[gamma(lowerCamelCase_ ), -gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), 0, 0],
[-gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), gamma(lowerCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowercase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE : Optional[int] = transform(29979245)
print("Example of four vector: ")
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 84
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any) -> Optional[int]:
'''simple docstring'''
with open(_lowerCamelCase) as metadata_file:
__UpperCamelCase : List[str] = json.load(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata["model_config"])
# Load in the weights from the checkpoint_path
__UpperCamelCase : Dict = torch.load(_lowerCamelCase , map_location="cpu")["""module"""]
# Load the entity vocab file
__UpperCamelCase : str = load_original_entity_vocab(_lowerCamelCase)
# add an entry for [MASK2]
__UpperCamelCase : Optional[int] = max(entity_vocab.values()) + 1
config.entity_vocab_size += 1
__UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase : Union[str, Any] = AddedToken("<ent>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase)
__UpperCamelCase : Tuple = AddedToken("<ent2>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase)
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}')
tokenizer.save_pretrained(_lowerCamelCase)
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json") , "r") as f:
__UpperCamelCase : Tuple = json.load(_lowerCamelCase)
__UpperCamelCase : str = """MLukeTokenizer"""
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json") , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
with open(os.path.join(_lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"]) , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : str = MLukeTokenizer.from_pretrained(_lowerCamelCase)
# Initialize the embeddings of the special tokens
__UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(["@"])[0]
__UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(["#"])[0]
__UpperCamelCase : List[str] = state_dict["""embeddings.word_embeddings.weight"""]
__UpperCamelCase : Dict = word_emb[ent_init_index].unsqueeze(0)
__UpperCamelCase : Dict = word_emb[enta_init_index].unsqueeze(0)
__UpperCamelCase : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb])
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCamelCase : List[str] = state_dict[bias_name]
__UpperCamelCase : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0)
__UpperCamelCase : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0)
__UpperCamelCase : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase : int = F'encoder.layer.{layer_index}.attention.self.'
__UpperCamelCase : int = state_dict[prefix + matrix_name]
__UpperCamelCase : List[str] = state_dict[prefix + matrix_name]
__UpperCamelCase : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase : Optional[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
__UpperCamelCase : Tuple = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0)
__UpperCamelCase : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb])
# add [MASK2] for 'entity_predictions.bias'
__UpperCamelCase : int = state_dict["""entity_predictions.bias"""]
__UpperCamelCase : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0)
__UpperCamelCase : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias])
__UpperCamelCase : List[str] = LukeForMaskedLM(config=_lowerCamelCase).eval()
state_dict.pop("entity_predictions.decoder.weight")
state_dict.pop("lm_head.decoder.weight")
state_dict.pop("lm_head.decoder.bias")
__UpperCamelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head") or key.startswith("entity_predictions")):
__UpperCamelCase : int = state_dict[key]
else:
__UpperCamelCase : Tuple = state_dict[key]
__UpperCamelCase : Optional[Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
if set(_lowerCamelCase) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}')
if set(_lowerCamelCase) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}')
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCamelCase : Dict = MLukeTokenizer.from_pretrained(_lowerCamelCase , task="entity_classification")
__UpperCamelCase : List[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
__UpperCamelCase : Tuple = (0, 9)
__UpperCamelCase : Any = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt")
__UpperCamelCase : Optional[Any] = model(**_lowerCamelCase)
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase : Optional[Any] = torch.Size((1, 33, 768))
__UpperCamelCase : Dict = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}')
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCamelCase : List[Any] = torch.Size((1, 1, 768))
__UpperCamelCase : Any = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]])
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}')
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4):
raise ValueError
# Verify masked word/entity prediction
__UpperCamelCase : List[str] = MLukeTokenizer.from_pretrained(_lowerCamelCase)
__UpperCamelCase : Dict = """Tokyo is the capital of <mask>."""
__UpperCamelCase : str = (24, 30)
__UpperCamelCase : str = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt")
__UpperCamelCase : List[Any] = model(**_lowerCamelCase)
__UpperCamelCase : Optional[Any] = encoding["""input_ids"""][0].tolist()
__UpperCamelCase : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>"))
__UpperCamelCase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1)
assert "Japan" == tokenizer.decode(_lowerCamelCase)
__UpperCamelCase : Optional[int] = outputs.entity_logits[0][0].argmax().item()
__UpperCamelCase : Any = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:")][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_lowerCamelCase))
model.save_pretrained(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
__UpperCamelCase : Optional[int] = [json.loads(_lowerCamelCase) for line in open(_lowerCamelCase)]
__UpperCamelCase : Optional[int] = {}
for entry in data:
__UpperCamelCase : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCamelCase : Union[str, Any] = entity_id
break
__UpperCamelCase : str = F'{language}:{entity_name}'
__UpperCamelCase : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 232
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ : List[Any] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : int = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : Optional[int] = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple="replace" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : Union[str, Any]="<mask>" , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Dict = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Optional[int] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Optional[int] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = bigram
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : List[str] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Any = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = word
return word
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : str = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : List[str] = """ """ + text
return (text, kwargs)
| 112
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[Any] ={'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
lowercase_ :Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ , lowercase_ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ :Dict = 1
return lst
if __name__ == "__main__":
lowerCAmelCase : Any =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : List[str] =[int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 147
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__magic_name__ = StableDiffusionInpaintPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__magic_name__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ = frozenset([] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
A : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , )
A : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
A : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
A : Any = CLIPTextModel(snake_case_ )
A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Any:
"""simple docstring"""
A : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A : List[str] = Image.fromarray(np.uinta(snake_case_ ) ).convert('''RGB''' ).resize((64, 64) )
A : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(snake_case_ ).startswith('''mps''' ):
A : List[str] = torch.manual_seed(snake_case_ )
else:
A : Any = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A : Tuple = self.get_dummy_components()
A : str = StableDiffusionInpaintPipeline(**snake_case_ )
A : str = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
A : Dict = self.get_dummy_inputs(snake_case_ )
A : Dict = sd_pipe(**snake_case_ ).images
A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Optional[Any] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
A : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
A : Any = StableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
A : str = torch.manual_seed(0 )
A : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , output_type='''np''' , )
A : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
A : Dict = 'stabilityai/stable-diffusion-2-inpainting'
A : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , safety_checker=snake_case_ , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
A : Tuple = torch.manual_seed(0 )
A : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , output_type='''np''' , )
A : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A : Union[str, Any] = 'stabilityai/stable-diffusion-2-inpainting'
A : int = PNDMScheduler.from_pretrained(snake_case_ , subfolder='''scheduler''' )
A : Dict = StableDiffusionInpaintPipeline.from_pretrained(
snake_case_ , safety_checker=snake_case_ , scheduler=snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
A : Dict = torch.manual_seed(0 )
A : int = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='''np''' , )
A : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 3
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
A_ : Tuple = os.path.join(_UpperCAmelCase , 'words.txt' )
A_ : List[Any] = ''
with open(_UpperCAmelCase ) as f:
A_ : int = f.readline()
A_ : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A_ : Dict = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 286
| 0
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase = "▁" , _lowerCamelCase = True , _lowerCamelCase = "<unk>" , _lowerCamelCase = "</s>" , _lowerCamelCase = "<pad>" , ):
a :Dict = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
a :Any = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
a :Optional[int] = token_dict['''token''']
a :Union[str, Any] = Tokenizer(Unigram() )
a :str = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
a :Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCamelCase , add_prefix_space=_lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
a :Dict = decoders.Metaspace(replacement=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
a :Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
a :List[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = 8000 , _lowerCamelCase = True , ):
a :Union[str, Any] = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCamelCase , )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = [files]
self._tokenizer.train(_lowerCamelCase , trainer=_lowerCamelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = 8000 , _lowerCamelCase = True , ):
a :Optional[Any] = trainers.UnigramTrainer(
vocab_size=_lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCamelCase , )
self._tokenizer.train_from_iterator(_lowerCamelCase , trainer=_lowerCamelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = json.loads(self._tokenizer.to_str() )
a :List[Any] = self.special_tokens['''unk''']['''id''']
a :Any = Tokenizer.from_str(json.dumps(_lowerCamelCase ) )
| 366
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class _snake_case ( _snake_case ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None ):
a :Tuple = {}
if top_k is not None:
a :int = top_k
return {}, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = load_image(_lowerCamelCase )
a :Any = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = self.model(**_lowerCamelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
a :List[Any] = self.model.config.num_labels
if self.framework == "pt":
a :int = model_outputs.logits.softmax(-1 )[0]
a , a :Union[str, Any] = probs.topk(_lowerCamelCase )
elif self.framework == "tf":
a :Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
a :Union[str, Any] = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
a , a :Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
a :Optional[int] = scores.tolist()
a :str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 281
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=13 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : int=[1, 2, 1] , lowerCAmelCase__ : Optional[Any]=[2, 2, 4] , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=2.0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : List[str]=1E-5 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Any=8 , lowerCAmelCase__ : List[str]=["stage1", "stage2", "stage3"] , lowerCAmelCase__ : Optional[int]=[1, 2, 3] , ):
SCREAMING_SNAKE_CASE_: Optional[int] = parent
SCREAMING_SNAKE_CASE_: Optional[int] = batch_size
SCREAMING_SNAKE_CASE_: List[str] = image_size
SCREAMING_SNAKE_CASE_: int = patch_size
SCREAMING_SNAKE_CASE_: Optional[int] = num_channels
SCREAMING_SNAKE_CASE_: Optional[Any] = embed_dim
SCREAMING_SNAKE_CASE_: List[str] = depths
SCREAMING_SNAKE_CASE_: Any = num_heads
SCREAMING_SNAKE_CASE_: Any = window_size
SCREAMING_SNAKE_CASE_: str = mlp_ratio
SCREAMING_SNAKE_CASE_: Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: int = use_absolute_embeddings
SCREAMING_SNAKE_CASE_: Tuple = patch_norm
SCREAMING_SNAKE_CASE_: Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] = is_training
SCREAMING_SNAKE_CASE_: List[str] = scope
SCREAMING_SNAKE_CASE_: List[Any] = use_labels
SCREAMING_SNAKE_CASE_: Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_: int = encoder_stride
SCREAMING_SNAKE_CASE_: List[str] = out_features
SCREAMING_SNAKE_CASE_: Optional[Any] = out_indices
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Optional[int] = MaskFormerSwinModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
SCREAMING_SNAKE_CASE_: Dict = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = MaskFormerSwinBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = ["stem"]
SCREAMING_SNAKE_CASE_: Dict = MaskFormerSwinBackbone(config=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = MaskFormerSwinModelTester(self)
SCREAMING_SNAKE_CASE_: Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
def _SCREAMING_SNAKE_CASE ( self : Any):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
@unittest.skip("Swin does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass
@unittest.skip("Swin does not support feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE_: int = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# Swin has a different seq_length
SCREAMING_SNAKE_CASE_: Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_: str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: int = 3
SCREAMING_SNAKE_CASE_: Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_: Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_: Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = 0
return t
def check_equivalence(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]={}):
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__).to_tuple()
def recursive_check(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]):
if isinstance(lowerCAmelCase__ , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase__) , set_nan_tensor_to_zero(lowerCAmelCase__) , atol=1E-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(lowerCAmelCase__).any()} and `inf`: {torch.isinf(lowerCAmelCase__)}. Dict has"
F" `nan`: {torch.isnan(lowerCAmelCase__).any()} and `inf`: {torch.isinf(lowerCAmelCase__)}."
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"output_hidden_states": True})
SCREAMING_SNAKE_CASE_: List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"output_hidden_states": True})
@require_torch
class __lowercase ( unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCAmelCase : str = MaskFormerSwinConfig
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[Any] = MaskFormerSwinModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = backbone_class(lowerCAmelCase__)
backbone.to(lowerCAmelCase__)
backbone.eval()
SCREAMING_SNAKE_CASE_: Any = backbone(**lowerCAmelCase__)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase__)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE_: str = backbone(**lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE_: Dict = backbone(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__)
self.assertIsNotNone(outputs.attentions)
| 13
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "nllb-moe"
__UpperCamelCase = ["past_key_values"]
__UpperCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _a=128_112 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.05 , _a=0.05 , _a=True , _a=True , _a="relu" , _a=1_024 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.02 , _a=2 , _a=True , _a=False , _a="float32" , _a=False , _a=128 , _a=64 , _a=4 , _a=4 , _a=0.001 , _a=0.001 , _a="all" , _a=False , _a=False , _a=1.0 , _a=0.2 , _a=1 , _a=0 , _a=2 , _a=False , **_a , ):
"""simple docstring"""
lowerCamelCase = vocab_size
lowerCamelCase = max_position_embeddings
lowerCamelCase = d_model
lowerCamelCase = encoder_ffn_dim
lowerCamelCase = encoder_layers
lowerCamelCase = encoder_attention_heads
lowerCamelCase = decoder_ffn_dim
lowerCamelCase = decoder_layers
lowerCamelCase = decoder_attention_heads
lowerCamelCase = dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = activation_function
lowerCamelCase = init_std
lowerCamelCase = encoder_layerdrop
lowerCamelCase = decoder_layerdrop
lowerCamelCase = use_cache
lowerCamelCase = encoder_layers
lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase = router_z_loss_coef
lowerCamelCase = router_aux_loss_coef
lowerCamelCase = decoder_sparse_step
lowerCamelCase = encoder_sparse_step
lowerCamelCase = num_experts
lowerCamelCase = expert_capacity
lowerCamelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCamelCase = router_dtype
lowerCamelCase = router_ignore_padding_tokens
lowerCamelCase = batch_prioritized_routing
lowerCamelCase = second_expert_policy
lowerCamelCase = normalize_router_prob_before_dropping
lowerCamelCase = moe_eval_capacity_token_fraction
lowerCamelCase = moe_token_dropout
lowerCamelCase = output_router_logits
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , **_a , )
| 168
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def a__ ( snake_case__ ) -> Tuple:
if hor == 1_28:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 64, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase = model.state_dict()
lowerCamelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> Optional[int]:
lowerCamelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase = model
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 168
| 1
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :List[Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 84
|
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image:
'''simple docstring'''
def brightness(lowercase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__UpperCAmelCase = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 84
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Dict = _symbol_database.Default()
a : int = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
a : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Union[str, Any] = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : Tuple = 45
a : Any = 1_581
a : Optional[int] = 1_517
a : Optional[Any] = 1_570
a : Optional[Any] = 1_584
a : Optional[Any] = 1_793
a : Dict = 1_795
a : Any = 1_916
a : Tuple = 1_864
a : Tuple = 1_905
a : Dict = 1_919
a : int = 2_429
a : List[Any] = 2_208
a : Dict = 2_418
a : Union[str, Any] = 2_323
a : str = 2_407
# @@protoc_insertion_point(module_scope)
| 147
|
from __future__ import annotations
from typing import Any
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_ = 6 ) -> None:
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
self.create_linked_list(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: Optional[Any] = Node()
UpperCAmelCase_: Optional[Any] = current_node
UpperCAmelCase_: List[str] = current_node
UpperCAmelCase_: List[Any] = current_node
for _ in range(1, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = Node()
UpperCAmelCase_: Dict = current_node
UpperCAmelCase_: Any = previous_node
UpperCAmelCase_: Tuple = current_node
UpperCAmelCase_: Optional[Any] = self.front
UpperCAmelCase_: Any = previous_node
def __snake_case (self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case (self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase_: Optional[int] = self.rear.next
if self.rear:
UpperCAmelCase_: Any = data
def __snake_case (self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase_: Union[str, Any] = self.front.data
UpperCAmelCase_: Any = None
return data
UpperCAmelCase_: str = self.front
UpperCAmelCase_: Union[str, Any] = old_front.next
UpperCAmelCase_: int = old_front.data
UpperCAmelCase_: Any = None
return data
def __snake_case (self ) -> None:
if self.is_empty():
raise Exception("""Empty Queue""" )
def __snake_case (self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _a :
def __init__(self ) -> None:
UpperCAmelCase_: Any | None = None
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[Any] = get_logger()
__lowerCamelCase : Optional[dict] = None
class A__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
super().__init__(features=A_ )
import jax
from jaxlib.xla_client import Device
if isinstance(A_ , A_ ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(A_ )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCamelCase : int = device if isinstance(A_ , A_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase : Any = str(jax.devices()[0] )
UpperCamelCase : Optional[Any] = jnp_array_kwargs
@staticmethod
def __UpperCamelCase( ):
'''simple docstring'''
import jax
return {str(A_ ): device for device in jax.devices()}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(A_ , A_ ) and column:
if all(
isinstance(A_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A_ , axis=0 )
return column
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(A_ , (str, bytes, type(A_ )) ):
return value
elif isinstance(A_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase : Any = {}
if isinstance(A_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase : Any = {"dtype": jnp.intaa}
else:
UpperCamelCase : Any = {"dtype": jnp.intaa}
elif isinstance(A_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase : Tuple = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A_ , PIL.Image.Image ):
UpperCamelCase : str = np.asarray(A_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A_ , **{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A_ , "__array__" ) and not isinstance(A_ , jax.Array ):
UpperCamelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
elif isinstance(A_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
return self._tensorize(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , A_ , map_list=A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.numpy_arrow_extractor().extract_row(A_ )
UpperCamelCase : Tuple = self.python_features_decoder.decode_row(A_ )
return self.recursive_tensorize(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_column(A_ )
UpperCamelCase : List[str] = self.python_features_decoder.decode_column(A_ , pa_table.column_names[0] )
UpperCamelCase : Any = self.recursive_tensorize(A_ )
UpperCamelCase : int = self._consolidate(A_ )
return column
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.numpy_arrow_extractor().extract_batch(A_ )
UpperCamelCase : Optional[Any] = self.python_features_decoder.decode_batch(A_ )
UpperCamelCase : Dict = self.recursive_tensorize(A_ )
for column_name in batch:
UpperCamelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 140
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[float]:
UpperCamelCase , UpperCamelCase : List[Any] = coefficient_matrix.shape
UpperCamelCase , UpperCamelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
UpperCamelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
UpperCamelCase : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
UpperCamelCase : Any = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase , UpperCamelCase : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = []
for row in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
for col in range(_lowerCAmelCase ):
if col == row:
UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCamelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase : Dict = (temp + val) / denom
new_val.append(_lowerCAmelCase )
UpperCamelCase : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase , UpperCamelCase : Dict = table.shape
UpperCamelCase : List[Any] = True
for i in range(0 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
class A_ :
def __init__( self : List[str] , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = size
__lowerCAmelCase: Union[str, Any] = [0] * size
__lowerCAmelCase: int = [0] * size
@staticmethod
def UpperCAmelCase ( UpperCAmelCase : Any ) -> List[str]:
return index | (index + 1)
@staticmethod
def UpperCAmelCase ( UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return (index & (index + 1)) - 1
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ) -> Tuple:
__lowerCAmelCase: str = value
while index < self.size:
__lowerCAmelCase: Optional[int] = self.get_prev(_a ) + 1
if current_left_border == index:
__lowerCAmelCase: str = value
else:
__lowerCAmelCase: Optional[Any] = max(_a , _a , _a )
__lowerCAmelCase: int = self.get_next(_a )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> List[Any]:
right -= 1 # Because of right is exclusive
__lowerCAmelCase: Any = 0
while left <= right:
__lowerCAmelCase: List[Any] = self.get_prev(_a )
if left <= current_left:
__lowerCAmelCase: Tuple = max(_a , self.tree[right] )
__lowerCAmelCase: int = current_left
else:
__lowerCAmelCase: Optional[Any] = max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = 1_60_00 ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(round(sample_rate * max_length ) )
if len(snake_case__ ) <= sample_length:
return wav
_SCREAMING_SNAKE_CASE = randint(0 ,len(snake_case__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCAmelCase :
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "Name of a dataset from the datasets package"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the training audio paths and labels."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A file containing the validation audio paths and labels."} )
__snake_case : str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
__snake_case : str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
__snake_case : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
__snake_case : str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__snake_case : float = field(
default=20 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__snake_case : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Name or path of preprocessor config."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__snake_case : Optional[bool] = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , UpperCAmelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" ,snake_case__ ,snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
_SCREAMING_SNAKE_CASE = DatasetDict()
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_SCREAMING_SNAKE_CASE = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_SCREAMING_SNAKE_CASE = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = []
for audio in batch[data_args.audio_column_name]:
_SCREAMING_SNAKE_CASE = random_subsample(
audio["""array"""] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case__ )
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ ):
_SCREAMING_SNAKE_CASE = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
_SCREAMING_SNAKE_CASE = feature_extractor(snake_case__ ,sampling_rate=feature_extractor.sampling_rate )
_SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(snake_case__ )}
_SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features[data_args.label_column_name].names
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = {}, {}
for i, label in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = str(snake_case__ )
_SCREAMING_SNAKE_CASE = label
# Load the accuracy metric from the datasets package
_SCREAMING_SNAKE_CASE = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
_SCREAMING_SNAKE_CASE = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=snake_case__ ,references=eval_pred.label_ids )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(snake_case__ ) ,labelaid=snake_case__ ,idalabel=snake_case__ ,finetuning_task="""audio-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case__ ,output_all_columns=snake_case__ )
# Initialize our trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=snake_case__ ,args=snake_case__ ,train_dataset=raw_datasets["""train"""] if training_args.do_train else None ,eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None ,compute_metrics=snake_case__ ,tokenizer=snake_case__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("""eval""" ,snake_case__ )
trainer.save_metrics("""eval""" ,snake_case__ )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 125
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_a = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def _A (lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
_a = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
_a = dct.pop(lowerCAmelCase__ )
_a = val
@torch.no_grad()
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
_a = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowerCAmelCase__ )
_a = False
_a = False
_a = False
_a = False
if "vqa" in checkpoint_url:
_a = True
_a = 31_29
_a = 'huggingface/label-files'
_a = 'vqa2-id2label.json'
_a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
_a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = ViltForQuestionAnswering(lowerCAmelCase__ )
elif "nlvr" in checkpoint_url:
_a = True
_a = 2
_a = {0: 'False', 1: 'True'}
_a = {v: k for k, v in config.idalabel.items()}
_a = 3
_a = ViltForImagesAndTextClassification(lowerCAmelCase__ )
elif "irtr" in checkpoint_url:
_a = True
_a = ViltForImageAndTextRetrieval(lowerCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
_a = True
_a = ViltForMaskedLM(lowerCAmelCase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
_a = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )['state_dict']
_a = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
if mlm_model or irtr_model:
_a = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_a , _a = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCAmelCase__ )
# Define processor
_a = ViltImageProcessor(size=3_84 )
_a = BertTokenizer.from_pretrained('bert-base-uncased' )
_a = ViltProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_a = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase__ ).raw )
_a = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase__ ).raw )
_a = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
_a = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
_a = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
_a = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_a = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowerCAmelCase__ ).raw )
if mlm_model:
_a = 'a bunch of [MASK] laying on a [MASK].'
else:
_a = 'How many cats are there?'
_a = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
_a = model(**lowerCAmelCase__ )
# Verify outputs
if mlm_model:
_a = torch.Size([1, 11, 3_05_22] )
_a = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
_a = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_a = torch.Size([1, 31_29] )
_a = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1E-4 )
# verify vqa prediction equals "2"
_a = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_a = torch.Size([1, 2] )
_a = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a_ : Optional[int] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 168
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a_ : str = _symbol_database.Default()
a_ : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a_ : List[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a_ : List[str] = None
a_ : Tuple = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a_ : Optional[int] = 4_5
a_ : Union[str, Any] = 1_5_8_1
a_ : List[Any] = 1_5_1_7
a_ : str = 1_5_7_0
a_ : List[Any] = 1_5_8_4
a_ : str = 1_7_9_3
a_ : List[str] = 1_7_9_5
a_ : Any = 1_9_1_6
a_ : List[str] = 1_8_6_4
a_ : Optional[Any] = 1_9_0_5
a_ : int = 1_9_1_9
a_ : int = 2_4_2_9
a_ : Dict = 2_2_0_8
a_ : Any = 2_4_1_8
a_ : Union[str, Any] = 2_3_2_3
a_ : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 168
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self : str , lowerCamelCase_ : str , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : int=7 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : List[Any]=99 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : str=4 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple="None" , lowerCamelCase_ : str=3 , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : List[str]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = relative_attention
UpperCamelCase = position_biased_input
UpperCamelCase = pos_att_type
UpperCamelCase = scope
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModel(config=lowerCamelCase_ )
UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaForMaskedLM(config=lowerCamelCase_ )
UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFDebertaVaForSequenceClassification(config=lowerCamelCase_ )
UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFDebertaVaForTokenClassification(config=lowerCamelCase_ )
UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaForQuestionAnswering(config=lowerCamelCase_ )
UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
UpperCamelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 )
| 165
|
import argparse
_SCREAMING_SNAKE_CASE = """docs/source/_static/js/custom.js"""
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCamelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
UpperCamelCase = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
update_custom_js(args.version)
| 165
| 1
|
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__A : int = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :str ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Tuple ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
| 8
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ):
'''simple docstring'''
snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {}
snake_case_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,):
super().__init__()
snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" )
snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" )
snake_case_ : Optional[int] = self.get_char_lens(self.src_file )
snake_case_ : List[str] = max_source_length
snake_case_ : str = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
snake_case_ : str = tokenizer
snake_case_ : str = prefix
if n_obs is not None:
snake_case_ : int = self.src_lens[:n_obs]
snake_case_ : Tuple = src_lang
snake_case_ : str = tgt_lang
def __len__( self :Any ):
return len(self.src_lens )
def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Optional[int] = index + 1 # linecache starts at 1
snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" )
snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
)
snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" )
snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" )
snake_case_ : int = source_inputs["""input_ids"""].squeeze()
snake_case_ : str = target_inputs["""input_ids"""].squeeze()
snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( _UpperCamelCase :str ):
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ):
snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase )
snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase )
snake_case_ : Optional[int] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : List[Any] = getLogger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : int = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ )
snake_case_ : List[str] = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ):
'''simple docstring'''
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , """wb""" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
def remove_articles(lowerCamelCase_ :str ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ :Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ :Tuple ):
snake_case_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ :Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
snake_case_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ):
'''simple docstring'''
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
snake_case_ : Optional[int] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ : Optional[int] = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config
| 8
| 1
|
def UpperCamelCase ( __lowercase : int = 10_00 ):
'''simple docstring'''
A_ : Tuple = 2**power
A_ : Any = 0
while n:
A_ , A_ : Optional[int] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 140
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 140
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple ={
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : List[Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266
| 1
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A_ : int = "bart"
A_ : Union[str, Any] = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def A ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
SCREAMING_SNAKE_CASE__ = qar_model.eval()
else:
SCREAMING_SNAKE_CASE__ = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
SCREAMING_SNAKE_CASE__ = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
SCREAMING_SNAKE_CASE__ = sas_model.eval()
else:
SCREAMING_SNAKE_CASE__ = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def A ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE__ = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )['''train''']
SCREAMING_SNAKE_CASE__ = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_28) , )
SCREAMING_SNAKE_CASE__ = faiss.IndexFlatIP(1_28 )
SCREAMING_SNAKE_CASE__ = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE__ ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE__ = (None, None)
SCREAMING_SNAKE_CASE__ = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
SCREAMING_SNAKE_CASE__ = elia['''train_eli5''']
SCREAMING_SNAKE_CASE__ = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_28) )
SCREAMING_SNAKE_CASE__ = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE__ )
return (elia_train, eli5_train_q_index)
A_ : Any = load_indexes()
A_ : str = load_models()
A_ : Optional[int] = load_train_data()
def A ( snake_case__ , snake_case__=10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = eli5_train_q_index.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = [elia_train[int(SCREAMING_SNAKE_CASE__ )] for i in I[0]]
return nn_examples
def A ( snake_case__ , snake_case__="wiki40b" , snake_case__="dense" , snake_case__=10 ):
'''simple docstring'''
if source == "none":
SCREAMING_SNAKE_CASE__ = (''' <P> '''.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE__ = query_qa_dense_index(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ = query_es_index(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index_name="""english_wiki40b_snippets_100w""" , n_results=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
SCREAMING_SNAKE_CASE__ = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=64 , snake_case__=2_56 , snake_case__=False , snake_case__=2 , snake_case__=0.95 , snake_case__=0.8 ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = qa_sas_generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , temp=SCREAMING_SNAKE_CASE__ , top_p=SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ , max_input_length=10_24 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
A_ : int = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
A_ : Tuple = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A_ : Dict = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
A_ : Any = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
A_ : Optional[Any] = st.sidebar.checkbox("Demo options")
if demo_options:
A_ : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
A_ : Optional[Any] = action_list.index(action_st)
A_ : Dict = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
A_ : str = show_type == "Show full text of passages"
else:
A_ : str = 3
A_ : str = True
A_ : Optional[Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
A_ : Any = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
A_ : List[str] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
A_ : Tuple = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
A_ : List[Any] = "wiki40b"
A_ : str = "dense"
A_ : Tuple = "beam"
A_ : Dict = 2
A_ : str = 64
A_ : str = 256
A_ : int = None
A_ : str = None
A_ : List[str] = st.sidebar.checkbox("Generation options")
if generate_options:
A_ : Union[str, Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
A_ : Optional[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
A_ : Union[str, Any] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A_ : Dict = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A_ : List[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A_ : str = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A_ : Optional[int] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A_ : Optional[int] = None
# start main text
A_ : int = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
A_ : Union[str, Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A_ : Optional[Any] = st.text_input("Enter your question here:", "")
else:
A_ : Any = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
A_ : List[str] = make_support(question, source=wiki_source, method="dense", n_results=10)
A_ : int = make_support(question, source=wiki_source, method="sparse", n_results=10)
A_ : List[str] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A_ : Optional[Any] = support_list[:10]
A_ : List[str] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
A_ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A_ : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
A_ : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
A_ : Union[str, Any] = res[1].strip()
if sec_titles == "":
A_ : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
A_ : List[Any] = sec_titles.split(" & ")
A_ : Union[str, Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
A_ : Optional[int] = find_nearest_training(question)
A_ : List[Any] = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
A_ : List[str] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
A_ : Dict = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 165
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
__a : int = BlenderbotConfig
__a : Any = {}
__a : str = "gelu"
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Any=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : Any=99 , __magic_name__ : List[Any]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[Any]=4 , __magic_name__ : List[str]=37 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=20 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : str = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : Any = input_ids[:1, :]
UpperCAmelCase_ : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[str] = inputs_dict['''head_mask''']
UpperCAmelCase_ : Any = 1
# first forward pass
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Tuple=None, SCREAMING_SNAKE_CASE__ : Any=None, ) -> Any:
if attention_mask is None:
UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a : List[str] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Any = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class __a (unittest.TestCase ):
__a : Union[str, Any] = ["My friends are cool but they eat too many carbs."]
__a : List[Any] = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 125
| 0
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , a__ , a__=2 , a__=8 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=16 , a__=5 , a__=2 , a__=36 , a__="gelu" , a__=0.0 , a__=0.0 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : Optional[int] = scope
def __A ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_config()
_lowerCAmelCase : str = 300
return config
def __A ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = self.prepare_config_and_inputs()
_lowerCAmelCase : Dict = True
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = MraModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : str = model(a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[str] = MraModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , )
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = MraForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = MraForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MraForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MraForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_choices
_lowerCAmelCase : Tuple = MraForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Union[str, Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = ()
def __A ( self ):
_lowerCAmelCase : List[str] = MraModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def __A ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = MraModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ):
return
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Dict = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowerCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(a__ )[0]
_lowerCAmelCase : Any = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : str = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowerCAmelCase : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ )[0]
_lowerCAmelCase : str = 50265
_lowerCAmelCase : Optional[int] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : int = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
@slow
def __A ( self ):
_lowerCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_lowerCAmelCase : str = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase : Any = model(a__ )[0]
_lowerCAmelCase : List[Any] = 50265
_lowerCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : List[str] = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 126
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
| 126
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( snake_case__ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
SCREAMING_SNAKE_CASE__ = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
SCREAMING_SNAKE_CASE__ = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = int(key_split[2] ), int(key_split[4] )
SCREAMING_SNAKE_CASE__ = config.vision_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ = rename_key(snake_case__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
SCREAMING_SNAKE_CASE__ = val.squeeze_()
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__="groupvit-gcc-yfcc" , snake_case__=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = GroupViTConfig()
SCREAMING_SNAKE_CASE__ = GroupViTModel(snake_case__ ).eval()
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE__ = convert_state_dict(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case__ ) == 0)
# verify result
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=snake_case__ , padding=snake_case__ , return_tensors="""pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**snake_case__ )
if model_name == "groupvit-gcc-yfcc":
SCREAMING_SNAKE_CASE__ = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
SCREAMING_SNAKE_CASE__ = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , snake_case__ , atol=1e-3 )
processor.save_pretrained(snake_case__ )
model.save_pretrained(snake_case__ )
print("""Successfully saved processor and model to""" , snake_case__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(snake_case__ , organization="""nielsr""" )
model.push_to_hub(snake_case__ , organization="""nielsr""" )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
A_ : Tuple = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 165
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Tuple = 'deberta-v2'
def __init__( self : Any , __UpperCAmelCase : Optional[Any]=1_2_8_1_0_0 , __UpperCAmelCase : Optional[Any]=1_5_3_6 , __UpperCAmelCase : List[Any]=2_4 , __UpperCAmelCase : str=2_4 , __UpperCAmelCase : Optional[int]=6_1_4_4 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Any=1e-7 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]="gelu" , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = max_relative_positions
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
SCREAMING_SNAKE_CASE__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kwargs.get("""pooler_hidden_size""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pooler_dropout
SCREAMING_SNAKE_CASE__ = pooler_hidden_act
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 1_2
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 165
| 1
|
from math import isqrt
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
A : Dict = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def __UpperCamelCase ( _lowerCAmelCase = 10**8 ) -> int:
"""simple docstring"""
A : Optional[Any] = calculate_prime_numbers(max_number // 2 )
A : List[str] = 0
A : Optional[Any] = 0
A : Optional[Any] = len(_lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 115
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A , A : Tuple = 9, 14 # noqa: F841
A : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A : int = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A : List[str] = mst(_lowerCAmelCase )
A : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A : Dict = tuple(answer[:2] )
A : List[str] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 115
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Tuple , *_UpperCamelCase : Any , **_UpperCamelCase : Optional[Any] ) ->None:
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 8
|
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 8
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase :List[Any] = {}
__UpperCamelCase :List[str] = tokenizer(example['''content'''] , truncation=lowerCAmelCase__ )['''input_ids''']
__UpperCamelCase :List[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__lowercase = HfArgumentParser(PretokenizationArguments)
__lowercase = parser.parse_args()
if args.num_workers is None:
__lowercase = multiprocessing.cpu_count()
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowercase = time.time()
__lowercase = load_dataset(args.dataset_name, split='''train''')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
__lowercase = time.time()
__lowercase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
__lowercase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 359
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 105
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
__A = float(__UpperCamelCase )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__A = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
__A = len(str(__UpperCamelCase ).split('''.''' )[1] )
__A = int(decimal * (1_0**number_of_frac_digits) )
__A = 1_0**number_of_frac_digits
__A , __A = denominator, numerator
while True:
__A = dividend % divisor
if remainder == 0:
break
__A , __A = divisor, remainder
__A , __A = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 266
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
__A = global_rng
__A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = min_seq_length
__A = max_seq_length
__A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A = feature_size
__A = padding_value
__A = sampling_rate
__A = return_attention_mask
__A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ):
'''simple docstring'''
def _flatten(_lowerCamelCase : List[str] ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
__A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
__A = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = ASTFeatureExtractor
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = ASTFeatureExtractionTester(self )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
__A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
__A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
# Test batched
__A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values
__A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__A = np.asarray(_lowerCamelCase )
__A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values
__A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
import torch
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = np.random.rand(1_00 ).astype(np.floataa )
__A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
__A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# fmt: off
__A = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
__A = self._load_datasamples(1 )
__A = ASTFeatureExtractor()
__A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
| 266
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case_ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_snake_case , )
def UpperCAmelCase__ ( self : str , _snake_case : int , _snake_case : int )->Tuple:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : Any , _snake_case : Any , _snake_case : int )->Union[str, Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_snake_case )
class snake_case_ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_snake_case , )
def UpperCAmelCase__ ( self : int , _snake_case : Tuple , _snake_case : Tuple )->Dict:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Any , _snake_case : List[str] )->Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_snake_case )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _SCREAMING_SNAKE_CASE ( ) -> str:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class snake_case_ ( __lowercase ):
@require_beam
def UpperCAmelCase__ ( self : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase : Union[str, Any] = DummyBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowerCAmelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
import apache_beam as beam
__lowerCAmelCase : Optional[Any] = beam.io.parquetio.WriteToParquet
__lowerCAmelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase : List[str] = DummyBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__lowerCAmelCase : Any = partial(_snake_case , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_snake_case , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_snake_case , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowerCAmelCase : Any = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase : Any = DummyBeamDataset(cache_dir=_snake_case )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Any )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase : int = NestedBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__lowerCAmelCase : Dict = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 356
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__lowerCAmelCase : str = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase : str = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print("""\n""".join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase : str = 1
return True
__lowerCAmelCase : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase : Optional[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase : Tuple = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
__lowerCAmelCase : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""pixel_values"""]
def __init__( self :Any , lowerCamelCase_ :bool = True , lowerCamelCase_ :int = 32 , lowerCamelCase_ :List[str]=PILImageResampling.BILINEAR , lowerCamelCase_ :bool = True , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
lowerCamelCase__ : List[str] =do_resize
lowerCamelCase__ : Optional[int] =do_rescale
lowerCamelCase__ : Union[str, Any] =size_divisor
lowerCamelCase__ : List[str] =resample
super().__init__(**lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[ChannelDimension] = None , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_image_size(lowerCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase__ : Tuple =height // size_divisor * size_divisor
lowerCamelCase__ : str =width // size_divisor * size_divisor
lowerCamelCase__ : Optional[Any] =resize(lowerCamelCase_ , (new_h, new_w) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
return image
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :float , lowerCamelCase_ :Optional[ChannelDimension] = None , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
return rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[Union[TensorType, str]] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :int , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[str] =size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase__ : Any =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
lowerCamelCase__ : Union[str, Any] =make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
lowerCamelCase__ : List[Any] =[to_numpy_array(lowerCamelCase_ ) for img in images]
if do_resize:
lowerCamelCase__ : Tuple =[self.resize(lowerCamelCase_ , size_divisor=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : List[Any] =[self.rescale(lowerCamelCase_ , scale=1 / 255 ) for image in images]
lowerCamelCase__ : Any =[to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Tuple ={'pixel_values': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 126
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Tuple:
lowerCamelCase__ : List[str] =fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , snake_case_ ).groups()[0]
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None ):
"""simple docstring"""
lowerCamelCase__ : Tuple =file_names
lowerCamelCase__ : str =image_transform
lowerCamelCase__ : str =label_to_id
def __len__( self :Optional[int] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self :Optional[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.file_names[idx]
lowerCamelCase__ : Dict =PIL.Image.open(lowerCamelCase_ )
lowerCamelCase__ : Dict =raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : int =self.image_transform(lowerCamelCase_ )
lowerCamelCase__ : List[str] =extract_label(lowerCamelCase_ )
if self.label_to_id is not None:
lowerCamelCase__ : Optional[int] =self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) ->Dict:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[Any] =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[str] =int(config['seed'] )
lowerCamelCase__ : Dict =int(config['batch_size'] )
lowerCamelCase__ : Optional[int] =config['image_size']
if not isinstance(snake_case_ , (list, tuple) ):
lowerCamelCase__ : Optional[int] =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Optional[Any] =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Union[str, Any] =int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : int =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Tuple =os.path.split(snake_case_ )[-1].split('.' )[0]
accelerator.init_trackers(snake_case_ , snake_case_ )
# Grab all the image filenames
lowerCamelCase__ : List[str] =[os.path.join(args.data_dir , snake_case_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : str =[extract_label(snake_case_ ) for fname in file_names]
lowerCamelCase__ : Any =list(set(snake_case_ ) )
id_to_label.sort()
lowerCamelCase__ : List[Any] ={lbl: i for i, lbl in enumerate(snake_case_ )}
# Set the seed before splitting the data.
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# Split our filenames between train and validation
lowerCamelCase__ : int =np.random.permutation(len(snake_case_ ) )
lowerCamelCase__ : Tuple =int(0.8 * len(snake_case_ ) )
lowerCamelCase__ : str =random_perm[:cut]
lowerCamelCase__ : Dict =random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : str =Compose([RandomResizedCrop(snake_case_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : Any =PetsDataset(
[file_names[i] for i in train_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Optional[int] =Compose([Resize(snake_case_ ), ToTensor()] )
lowerCamelCase__ : Dict =PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
lowerCamelCase__ : int =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Dict =create_model('resnet50d' , pretrained=snake_case_ , num_classes=len(snake_case_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Dict =False
for param in model.get_classifier().parameters():
lowerCamelCase__ : List[str] =True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Any =torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Dict =torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int =torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict =OneCycleLR(optimizer=snake_case_ , max_lr=snake_case_ , epochs=snake_case_ , steps_per_epoch=len(snake_case_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : int =0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : int =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : int =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Optional[int] =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Tuple =os.path.splitext(snake_case_ )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Union[str, Any] =int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Optional[int] =None
else:
lowerCamelCase__ : List[Any] =int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int =resume_step // len(snake_case_ )
resume_step -= starting_epoch * len(snake_case_ )
# Now we train the model
for epoch in range(snake_case_ , snake_case_ ):
model.train()
if args.with_tracking:
lowerCamelCase__ : str =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Tuple =accelerator.skip_first_batches(snake_case_ , snake_case_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : int ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] =(batch['image'] - mean) / std
lowerCamelCase__ : Any =model(snake_case_ )
lowerCamelCase__ : List[Any] =torch.nn.functional.cross_entropy(snake_case_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : int =f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
model.eval()
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Union[str, Any] ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : int =(batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : List[str] =model(snake_case_ )
lowerCamelCase__ : Optional[int] =outputs.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Union[str, Any] =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : List[str] =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_0_0 * eval_metric,
'train_loss': total_loss.item() / len(snake_case_ ),
'epoch': epoch,
} , step=snake_case_ , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple =f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Tuple =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) ->int:
lowerCamelCase__ : Any =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=snake_case_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=snake_case_ , default=snake_case_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=snake_case_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=snake_case_ , default=snake_case_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=snake_case_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Dict =parser.parse_args()
lowerCamelCase__ : List[str] ={'lr': 3E-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 126
| 1
|
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowercase__ ( ):
'''simple docstring'''
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 145
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__UpperCAmelCase = 'Usage of script: script_name <size_of_canvas:int>'
__UpperCAmelCase = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def lowercase__ ( __snake_case : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
UpperCAmelCase_ : Tuple = bool(random.getrandbits(1 ) )
def lowercase__ ( __snake_case : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = np.array(__snake_case )
UpperCAmelCase_ : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
UpperCAmelCase_ : Optional[int] = __judge_point(
__snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase_ : List[Any] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowercase__ ( __snake_case : bool , __snake_case : list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase_ : List[Any] = pt
if pt:
if alive < 2:
UpperCAmelCase_ : str = False
elif alive == 2 or alive == 3:
UpperCAmelCase_ : int = True
elif alive > 3:
UpperCAmelCase_ : List[Any] = False
else:
if alive == 3:
UpperCAmelCase_ : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__UpperCAmelCase = int(sys.argv[1])
# main working structure of this module.
__UpperCAmelCase = create_canvas(canvas_size)
seed(c)
__UpperCAmelCase , __UpperCAmelCase = plt.subplots()
fig.show()
__UpperCAmelCase = ListedColormap(['w', 'k'])
try:
while True:
__UpperCAmelCase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 145
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _A :
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_UpperCamelCase : bool = field(
default=lowercase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _A :
_UpperCamelCase : str = field(
default=lowercase__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : str = field(
default=lowercase__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
_UpperCamelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
_UpperCamelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : Optional[bool] = field(
default=lowercase__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
_UpperCamelCase : bool = field(
default=lowercase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCamelCase : bool = field(
default=lowercase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_UpperCamelCase : bool = field(
default=lowercase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def snake_case( ) -> int:
'''simple docstring'''
lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase : Tuple = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase : List[Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : str = train_dataset.features["""label"""].names
if training_args.do_eval:
lowercase : Any = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : str = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowercase : Optional[Any] = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : List[str] = predict_dataset.features["""label"""].names
# Labels
lowercase : Tuple = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase : List[Any] = False
def preprocess_function(__magic_name__ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=__lowerCamelCase , max_length=data_args.max_seq_length , truncation=__lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase : int = min(len(__lowerCamelCase ) , data_args.max_train_samples )
lowercase : Dict = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowercase : Union[str, Any] = train_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase : Tuple = min(len(__lowerCamelCase ) , data_args.max_eval_samples )
lowercase : List[str] = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowercase : Dict = eval_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase : Dict = min(len(__lowerCamelCase ) , data_args.max_predict_samples )
lowercase : Tuple = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowercase : Any = predict_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowercase : Tuple = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
lowercase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
lowercase : str = np.argmax(__lowerCamelCase , axis=1 )
return metric.compute(predictions=__lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase : Any = default_data_collator
elif training_args.fpaa:
lowercase : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
lowercase : int = None
# Initialize our Trainer
lowercase : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
lowercase : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowercase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Union[str, Any] = last_checkpoint
lowercase : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
lowercase : Dict = train_result.metrics
lowercase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
lowercase : Dict = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , __lowerCamelCase )
trainer.save_metrics('''train''' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : Dict = trainer.evaluate(eval_dataset=__lowerCamelCase )
lowercase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
lowercase : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' , __lowerCamelCase )
trainer.save_metrics('''eval''' , __lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase : Optional[Any] = trainer.predict(__lowerCamelCase , metric_key_prefix='''predict''' )
lowercase : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
lowercase : Optional[int] = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('''predict''' , __lowerCamelCase )
trainer.save_metrics('''predict''' , __lowerCamelCase )
lowercase : Optional[int] = np.argmax(__lowerCamelCase , axis=1 )
lowercase : Tuple = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__lowerCamelCase ):
lowercase : Tuple = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 308
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'MCTCTFeatureExtractor'
a : str = 'AutoTokenizer'
def __init__( self : Tuple , __lowercase : int , __lowercase : Dict ) -> Any:
super().__init__(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = self.feature_extractor
__UpperCAmelCase : Optional[int] = False
def __call__( self : int , *__lowercase : Tuple , **__lowercase : Optional[int] ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__UpperCAmelCase : Dict = kwargs.pop("""raw_speech""" )
else:
__UpperCAmelCase : Dict = kwargs.pop("""audio""" , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop("""sampling_rate""" , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop("""text""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Tuple = args[0]
__UpperCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase : Dict = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : int ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = kwargs.pop("""input_features""" , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""labels""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Union[str, Any] = args[0]
__UpperCAmelCase : str = args[1:]
if input_features is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
if labels is not None:
__UpperCAmelCase : Union[str, Any] = self.tokenizer.pad(__lowercase , **__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCAmelCase : Any = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ) -> List[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@contextmanager
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = self.tokenizer
yield
__UpperCAmelCase : List[Any] = self.feature_extractor
__UpperCAmelCase : int = False
| 114
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__a , __a , __a = False, False, False
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = None
lowercase = True
lowercase = True
lowercase = None
# Automatically constructed
lowercase = "dict"
lowercase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase = field(default="Audio" , init=_a , repr=_a )
def __call__( self : Union[str, Any] ):
return self.pa_type
def lowerCamelCase ( self : Tuple , snake_case_ : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(snake_case_ , snake_case_ ):
return {"bytes": None, "path": value}
elif isinstance(snake_case_ , snake_case_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case__ : Any = BytesIO()
sf.write(snake_case_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case__ : Dict = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
snake_case__ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
snake_case__ : int = BytesIO(bytes() )
sf.write(snake_case_ , snake_case_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCamelCase ( self : Any , snake_case_ : dict , snake_case_ : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case__ : int = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
snake_case__ : Dict = xsplitext(snake_case_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case__ : Union[str, Any] = token_per_repo_id or {}
snake_case__ : Any = path.split("""::""" )[-1]
try:
snake_case__ : int = string_to_dict(snake_case_ , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case__ : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case__ : Any = None
with xopen(snake_case_ , """rb""" , use_auth_token=snake_case_ ) as f:
snake_case__ : int = sf.read(snake_case_ )
else:
snake_case__ : str = sf.read(snake_case_ )
snake_case__ : Dict = array.T
if self.mono:
snake_case__ : List[Any] = librosa.to_mono(snake_case_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case__ : Optional[Any] = librosa.resample(snake_case_ , orig_sr=snake_case_ , target_sr=self.sampling_rate )
snake_case__ : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase ( self : Union[str, Any] ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def lowerCamelCase ( self : Any , snake_case_ : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
snake_case__ : Optional[Any] = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
snake_case__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case__ : Any = pa.array([None] * len(snake_case_ ) , type=pa.string() )
snake_case__ : Optional[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case__ : List[str] = pa.array([Audio().encode_example(snake_case_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case__ : int = storage.field("""bytes""" )
else:
snake_case__ : str = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case__ : Optional[Any] = storage.field("""path""" )
else:
snake_case__ : Optional[int] = pa.array([None] * len(snake_case_ ) , type=pa.string() )
snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(snake_case_ , self.pa_type )
def lowerCamelCase ( self : Any , snake_case_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(snake_case_ : Optional[Any] ):
with xopen(snake_case_ , """rb""" ) as f:
snake_case__ : str = f.read()
return bytes_
snake_case__ : Union[str, Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case__ : List[str] = pa.array(
[os.path.basename(snake_case_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case__ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(snake_case_ , self.pa_type )
| 354
|
'''simple docstring'''
import math
import qiskit
def __snake_case( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
snake_case__ : List[str] = qiskit.QuantumRegister(4 , """qr""" )
snake_case__ : Optional[int] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
snake_case__ : List[Any] = [input_a, input_a, carry_in]
snake_case__ : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
snake_case__ : int = qiskit.Aer.get_backend("""aer_simulator""" )
snake_case__ : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 43
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : int = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
snake_case : Optional[Any] = None
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[int] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=UpperCAmelCase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=UpperCAmelCase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a :List[str] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
def remove_articles(UpperCAmelCase_ : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , UpperCAmelCase_ )
def white_space_fix(UpperCAmelCase_ : Tuple ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ : List[str] ):
a :List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) )
def __lowerCamelCase ( UpperCAmelCase_ : Any ):
"""simple docstring"""
if not s:
return []
return normalize_answer(UpperCAmelCase_ ).split()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Dict = get_tokens(UpperCAmelCase_ )
a :Dict = get_tokens(UpperCAmelCase_ )
a :Dict = collections.Counter(UpperCAmelCase_ ) & collections.Counter(UpperCAmelCase_ )
a :List[str] = sum(common.values() )
if len(UpperCAmelCase_ ) == 0 or len(UpperCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a :Dict = 1.0 * num_same / len(UpperCAmelCase_ )
a :int = 1.0 * num_same / len(UpperCAmelCase_ )
a :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Union[str, Any] = {}
a :int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a :str = qa['''id''']
a :Any = [t for t in qa['''answers''']['''text'''] if normalize_answer(UpperCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a :Optional[Any] = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
a :List[Any] = preds[qid]
# Take max over all gold answers
a :Any = max(compute_exact(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
a :Optional[int] = max(compute_fa(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :Dict = {}
for qid, s in scores.items():
a :Any = na_probs[qid] > na_prob_thresh
if pred_na:
a :int = float(not qid_to_has_ans[qid] )
else:
a :Union[str, Any] = s
return new_scores
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
if not qid_list:
a :Optional[Any] = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
a :Optional[int] = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
for k in new_eval:
a :List[Any] = new_eval[k]
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
plt.step(UpperCAmelCase_ , UpperCAmelCase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(UpperCAmelCase_ , UpperCAmelCase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase_ )
plt.savefig(UpperCAmelCase_ )
plt.clf()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=None ):
"""simple docstring"""
a :Optional[Any] = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : na_probs[k] )
a :List[str] = 0.0
a :str = 1.0
a :Any = 0.0
a :Optional[Any] = [1.0]
a :Any = [0.0]
a :Tuple = 0.0
for i, qid in enumerate(UpperCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a :List[str] = true_pos / float(i + 1 )
a :str = true_pos / float(UpperCAmelCase_ )
if i == len(UpperCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase_ )
recalls.append(UpperCAmelCase_ )
if out_image:
plot_pr_curve(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
if out_image_dir and not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
a :Optional[int] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a :Union[str, Any] = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
a :Union[str, Any] = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
a :Union[str, Any] = {k: float(UpperCAmelCase_ ) for k, v in qid_to_has_ans.items()}
a :int = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''pr_exact''' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''pr_f1''' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''pr_oracle''' )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
if not qid_list:
return
a :List[Any] = [na_probs[k] for k in qid_list]
a :List[str] = np.ones_like(UpperCAmelCase_ ) / float(len(UpperCAmelCase_ ) )
plt.hist(UpperCAmelCase_ , weights=UpperCAmelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(UpperCAmelCase_ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
"""simple docstring"""
a :str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a :List[Any] = num_no_ans
a :str = cur_score
a :List[Any] = 0.0
a :Tuple = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a :Dict = scores[qid]
else:
if preds[qid]:
a :Optional[int] = -1
else:
a :Optional[Any] = 0
cur_score += diff
if cur_score > best_score:
a :Tuple = cur_score
a :Dict = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase_ ), best_thresh
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :List[Any] = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a , a :Optional[int] = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Optional[Any] = best_exact
a :int = exact_thresh
a :Any = best_fa
a :int = fa_thresh
def __lowerCamelCase ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
a :Dict = json.load(UpperCAmelCase_ )
a :List[str] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
a :Dict = json.load(UpperCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a :Dict = json.load(UpperCAmelCase_ )
else:
a :Any = {k: 0.0 for k in preds}
a :int = make_qid_to_has_ans(UpperCAmelCase_ ) # maps qid to True/False
a :Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
a :Dict = [k for k, v in qid_to_has_ans.items() if not v]
a , a :List[Any] = get_raw_scores(UpperCAmelCase_ , UpperCAmelCase_ )
a :Any = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
a :Any = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
a :List[Any] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ )
if has_ans_qids:
a :Tuple = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''HasAns''' )
if no_ans_qids:
a :Optional[int] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
else:
print(json.dumps(UpperCAmelCase_ , indent=2 ) )
if __name__ == "__main__":
snake_case : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 94
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 105
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( __UpperCAmelCase ):
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'Pix2StructImageProcessor'
__snake_case = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = False
super().__init__(UpperCamelCase__, UpperCamelCase__ )
def __call__( self, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = True, UpperCamelCase__ = False, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = 2048, UpperCamelCase__ = 0, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = False, UpperCamelCase__ = True, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase_ = self.tokenizer
lowerCAmelCase_ = self.tokenizer(
text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase_ = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, max_patches=UpperCamelCase__, **UpperCamelCase__ )
else:
# add pixel_values and bbox
lowerCAmelCase_ = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, max_patches=UpperCamelCase__, header_text=UpperCamelCase__, **UpperCamelCase__ )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase_ = self.tokenizer(
text=UpperCamelCase__, add_special_tokens=UpperCamelCase__, padding=UpperCamelCase__, truncation=UpperCamelCase__, max_length=UpperCamelCase__, stride=UpperCamelCase__, pad_to_multiple_of=UpperCamelCase__, return_attention_mask=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, return_special_tokens_mask=UpperCamelCase__, return_offsets_mapping=UpperCamelCase__, return_token_type_ids=UpperCamelCase__, return_length=UpperCamelCase__, verbose=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
if "attention_mask" in text_encoding:
lowerCAmelCase_ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowerCAmelCase_ = text_encoding.pop('''input_ids''' )
else:
lowerCAmelCase_ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer.model_input_names
lowerCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 167
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_A = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_A , _A = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_A = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_A = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_A = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 167
| 1
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
_A : List[Any] = '''char'''
_A : str = '''bpe'''
_A : str = '''wp'''
UpperCamelCase__ : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
_A : Optional[Any] = ['''image_processor''', '''char_tokenizer''']
_A : Any = '''ViTImageProcessor'''
_A : List[str] = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
__SCREAMING_SNAKE_CASE : str = tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("""gpt2""" )
__SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : List[str] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__SCREAMING_SNAKE_CASE : int = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.char_tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = encodings["input_ids"]
return inputs
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = sequences
__SCREAMING_SNAKE_CASE : Dict = char_preds.size(0 )
__SCREAMING_SNAKE_CASE : Tuple = self._decode_helper(lowerCAmelCase__ , """char""" )
__SCREAMING_SNAKE_CASE : List[Any] = self._decode_helper(lowerCAmelCase__ , """bpe""" )
__SCREAMING_SNAKE_CASE : int = self._decode_helper(lowerCAmelCase__ , """wp""" )
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]]
__SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
__SCREAMING_SNAKE_CASE : Any = scores.index(max(lowerCAmelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__SCREAMING_SNAKE_CASE : int = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = final_strs
__SCREAMING_SNAKE_CASE : Any = final_scores
__SCREAMING_SNAKE_CASE : Union[str, Any] = char_strs
__SCREAMING_SNAKE_CASE : Tuple = bpe_strs
__SCREAMING_SNAKE_CASE : Dict = wp_strs
return out
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__SCREAMING_SNAKE_CASE : str = self.char_decode
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = "[s]"
elif format == DecodeType.BPE:
__SCREAMING_SNAKE_CASE : Dict = self.bpe_decode
__SCREAMING_SNAKE_CASE : Any = 2
__SCREAMING_SNAKE_CASE : str = "#"
elif format == DecodeType.WORDPIECE:
__SCREAMING_SNAKE_CASE : Optional[int] = self.wp_decode
__SCREAMING_SNAKE_CASE : int = 1_0_2
__SCREAMING_SNAKE_CASE : List[Any] = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
__SCREAMING_SNAKE_CASE : Optional[Any] = [], []
__SCREAMING_SNAKE_CASE : Dict = pred_logits.size(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(1 )
__SCREAMING_SNAKE_CASE : int = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase__ , sorted=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = preds_index.view(-1 , lowerCAmelCase__ )[:, 1:]
__SCREAMING_SNAKE_CASE : Dict = decoder(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.softmax(lowerCAmelCase__ , dim=2 ).max(dim=2 )
__SCREAMING_SNAKE_CASE : Tuple = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = preds_str[index].find(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = preds_str[index][:pred_eos]
__SCREAMING_SNAKE_CASE : Any = preds_index[index].cpu().tolist()
__SCREAMING_SNAKE_CASE : Optional[int] = pred_index.index(lowerCAmelCase__ ) if eos_token in pred_index else -1
__SCREAMING_SNAKE_CASE : Any = preds_max_prob[index][: pred_eos_index + 1]
__SCREAMING_SNAKE_CASE : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase__ )
conf_scores.append(lowerCAmelCase__ )
return dec_strs, conf_scores
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
| 112
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=a , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self :Any , a :Optional[Any] ) -> Dict:
__UpperCamelCase : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__UpperCamelCase : Optional[int] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=a , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=a , py_version="py36" , )
def _lowerCamelCase ( self :Dict , a :Dict ) -> Optional[int]:
TrainingJobAnalytics(a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self :Dict , a :Tuple ) -> List[Any]:
# create estimator
__UpperCamelCase : int = self.create_estimator(a )
# run training
estimator.fit()
# result dataframe
__UpperCamelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a )
| 232
| 0
|
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 202
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__a = re.compile(R'\b(a|an|the)\b', re.UNICODE)
__a = None
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file", metavar="data.json", help="Input data JSON file." )
parser.add_argument("pred_file", metavar="pred.json", help="Model predictions." )
parser.add_argument(
"--out-file", "-o", metavar="eval.json", help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file", "-n", metavar="na_prob.json", help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh", "-t", type=a_, default=1.0, help="Predict \"\" if no-answer probability exceeds this (default = 1.0).", )
parser.add_argument(
"--out-image-dir", "-p", metavar="out_images", default=a_, help="Save precision-recall curves to directory." )
parser.add_argument("--verbose", "-v", action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCAmelCase : Union[str, Any] = bool(qa["answers"]["text"] )
return qid_to_has_ans
def __UpperCAmelCase ( a_: List[str] ):
def remove_articles(a_: str ):
return ARTICLES_REGEX.sub(" ", a_ )
def white_space_fix(a_: List[Any] ):
return " ".join(text.split() )
def remove_punc(a_: Optional[int] ):
_UpperCAmelCase : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_: List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def __UpperCAmelCase ( a_: str ):
if not s:
return []
return normalize_answer(a_ ).split()
def __UpperCAmelCase ( a_: Any, a_: List[str] ):
return int(normalize_answer(a_ ) == normalize_answer(a_ ) )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Tuple ):
_UpperCAmelCase : Union[str, Any] = get_tokens(a_ )
_UpperCAmelCase : str = get_tokens(a_ )
_UpperCAmelCase : Union[str, Any] = collections.Counter(a_ ) & collections.Counter(a_ )
_UpperCAmelCase : Optional[Any] = sum(common.values() )
if len(a_ ) == 0 or len(a_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1.0 * num_same / len(a_ )
_UpperCAmelCase : str = 1.0 * num_same / len(a_ )
_UpperCAmelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any] ):
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCAmelCase : List[str] = qa["id"]
_UpperCAmelCase : Tuple = [t for t in qa["answers"]["text"] if normalize_answer(a_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_UpperCAmelCase : Any = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
_UpperCAmelCase : Tuple = preds[qid]
# Take max over all gold answers
_UpperCAmelCase : Union[str, Any] = max(compute_exact(a_, a_ ) for a in gold_answers )
_UpperCAmelCase : Tuple = max(compute_fa(a_, a_ ) for a in gold_answers )
return exact_scores, fa_scores
def __UpperCAmelCase ( a_: Any, a_: Tuple, a_: List[str], a_: Dict ):
_UpperCAmelCase : Any = {}
for qid, s in scores.items():
_UpperCAmelCase : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
_UpperCAmelCase : str = float(not qid_to_has_ans[qid] )
else:
_UpperCAmelCase : List[Any] = s
return new_scores
def __UpperCAmelCase ( a_: Tuple, a_: str, a_: int=None ):
if not qid_list:
_UpperCAmelCase : Dict = len(a_ )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values() ) / total),
("f1", 1_00.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
_UpperCAmelCase : Any = len(a_ )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def __UpperCAmelCase ( a_: Tuple, a_: Dict, a_: Dict ):
for k in new_eval:
_UpperCAmelCase : Optional[Any] = new_eval[k]
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: Optional[int], a_: Dict ):
plt.step(a_, a_, color="b", alpha=0.2, where="post" )
plt.fill_between(a_, a_, step="post", alpha=0.2, color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a_ )
plt.savefig(a_ )
plt.clf()
def __UpperCAmelCase ( a_: Optional[int], a_: Tuple, a_: Optional[Any], a_: Any, a_: str=None, a_: str=None ):
_UpperCAmelCase : Any = sorted(a_, key=lambda a_ : na_probs[k] )
_UpperCAmelCase : Any = 0.0
_UpperCAmelCase : Tuple = 1.0
_UpperCAmelCase : Tuple = 0.0
_UpperCAmelCase : Any = [1.0]
_UpperCAmelCase : Union[str, Any] = [0.0]
_UpperCAmelCase : Optional[int] = 0.0
for i, qid in enumerate(a_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_UpperCAmelCase : Optional[int] = true_pos / float(i + 1 )
_UpperCAmelCase : Dict = true_pos / float(a_ )
if i == len(a_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a_ )
recalls.append(a_ )
if out_image:
plot_pr_curve(a_, a_, a_, a_ )
return {"ap": 1_00.0 * avg_prec}
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: int, a_: List[str], a_: Optional[int], a_: str ):
if out_image_dir and not os.path.exists(a_ ):
os.makedirs(a_ )
_UpperCAmelCase : List[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_UpperCAmelCase : Optional[Any] = make_precision_recall_eval(
a_, a_, a_, a_, out_image=os.path.join(a_, "pr_exact.png" ), title="Precision-Recall curve for Exact Match score", )
_UpperCAmelCase : Dict = make_precision_recall_eval(
a_, a_, a_, a_, out_image=os.path.join(a_, "pr_f1.png" ), title="Precision-Recall curve for F1 score", )
_UpperCAmelCase : Optional[int] = {k: float(a_ ) for k, v in qid_to_has_ans.items()}
_UpperCAmelCase : Dict = make_precision_recall_eval(
a_, a_, a_, a_, out_image=os.path.join(a_, "pr_oracle.png" ), title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)", )
merge_eval(a_, a_, "pr_exact" )
merge_eval(a_, a_, "pr_f1" )
merge_eval(a_, a_, "pr_oracle" )
def __UpperCAmelCase ( a_: Dict, a_: Union[str, Any], a_: int, a_: Optional[Any] ):
if not qid_list:
return
_UpperCAmelCase : List[str] = [na_probs[k] for k in qid_list]
_UpperCAmelCase : Tuple = np.ones_like(a_ ) / float(len(a_ ) )
plt.hist(a_, weights=a_, bins=20, range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(a_, f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: Any, a_: Optional[Any] ):
_UpperCAmelCase : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_UpperCAmelCase : List[Any] = num_no_ans
_UpperCAmelCase : Union[str, Any] = cur_score
_UpperCAmelCase : str = 0.0
_UpperCAmelCase : Dict = sorted(a_, key=lambda a_ : na_probs[k] )
for i, qid in enumerate(a_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_UpperCAmelCase : Union[str, Any] = scores[qid]
else:
if preds[qid]:
_UpperCAmelCase : Dict = -1
else:
_UpperCAmelCase : List[str] = 0
cur_score += diff
if cur_score > best_score:
_UpperCAmelCase : Optional[Any] = cur_score
_UpperCAmelCase : Optional[Any] = na_probs[qid]
return 1_00.0 * best_score / len(a_ ), best_thresh
def __UpperCAmelCase ( a_: Any, a_: Dict, a_: List[Any], a_: Optional[int], a_: str, a_: str ):
_UpperCAmelCase , _UpperCAmelCase : Any = find_best_thresh(a_, a_, a_, a_ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = find_best_thresh(a_, a_, a_, a_ )
_UpperCAmelCase : List[Any] = best_exact
_UpperCAmelCase : Union[str, Any] = exact_thresh
_UpperCAmelCase : Optional[int] = best_fa
_UpperCAmelCase : List[Any] = fa_thresh
def __UpperCAmelCase ( ):
with open(OPTS.data_file ) as f:
_UpperCAmelCase : int = json.load(a_ )
_UpperCAmelCase : str = dataset_json["data"]
with open(OPTS.pred_file ) as f:
_UpperCAmelCase : Optional[int] = json.load(a_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_UpperCAmelCase : Tuple = json.load(a_ )
else:
_UpperCAmelCase : str = {k: 0.0 for k in preds}
_UpperCAmelCase : List[Any] = make_qid_to_has_ans(a_ ) # maps qid to True/False
_UpperCAmelCase : str = [k for k, v in qid_to_has_ans.items() if v]
_UpperCAmelCase : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = get_raw_scores(a_, a_ )
_UpperCAmelCase : List[Any] = apply_no_ans_threshold(a_, a_, a_, OPTS.na_prob_thresh )
_UpperCAmelCase : str = apply_no_ans_threshold(a_, a_, a_, OPTS.na_prob_thresh )
_UpperCAmelCase : Dict = make_eval_dict(a_, a_ )
if has_ans_qids:
_UpperCAmelCase : str = make_eval_dict(a_, a_, qid_list=a_ )
merge_eval(a_, a_, "HasAns" )
if no_ans_qids:
_UpperCAmelCase : int = make_eval_dict(a_, a_, qid_list=a_ )
merge_eval(a_, a_, "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(a_, a_, a_, a_, a_, a_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a_, a_, a_, a_, a_, OPTS.out_image_dir )
histogram_na_prob(a_, a_, OPTS.out_image_dir, "hasAns" )
histogram_na_prob(a_, a_, OPTS.out_image_dir, "noAns" )
if OPTS.out_file:
with open(OPTS.out_file, "w" ) as f:
json.dump(a_, a_ )
else:
print(json.dumps(a_, indent=2 ) )
if __name__ == "__main__":
__a = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 145
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 145
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase_ : List[Any] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class _a ( unittest.TestCase ):
def _lowercase ( self ) -> str:
_snake_case = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,"models/bert/" ) )
_snake_case = self.transformer_dir
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE ,"src/transformers/models/bert/modeling_bert.py" ) ,os.path.join(self.transformer_dir ,"models/bert/modeling_bert.py" ) ,)
def _lowercase ( self ) -> Any:
_snake_case = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> str:
_snake_case = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_snake_case = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
_snake_case = black.format_str(_SCREAMING_SNAKE_CASE ,mode=_SCREAMING_SNAKE_CASE )
_snake_case = os.path.join(self.transformer_dir ,"new_code.py" )
with open(_SCREAMING_SNAKE_CASE ,"w" ,newline="\n" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,"r" ) as f:
self.assertTrue(f.read() ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Dict:
_snake_case = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Tuple:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" ,"BertLMPredictionHead" ,REFERENCE_CODE + "\n" ,)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" ,"BertLMPredictionHead" ,_SCREAMING_SNAKE_CASE ,)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" ,"TestModelLMPredictionHead" ,re.sub("Bert" ,"TestModel" ,_SCREAMING_SNAKE_CASE ) ,)
# Copy consistency with a really long name
_snake_case = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" ,f"""{long_class_name}LMPredictionHead""" ,re.sub("Bert" ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" ,"TestModelLMPredictionHead" ,_SCREAMING_SNAKE_CASE ,overwrite_result=re.sub("Bert" ,"TestModel" ,_SCREAMING_SNAKE_CASE ) ,)
def _lowercase ( self ) -> Optional[Any]:
_snake_case = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_snake_case = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_snake_case = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_snake_case = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_snake_case = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,localized_readme["format_model_list"] )
self.assertFalse(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_SCREAMING_SNAKE_CASE )
_snake_case = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_snake_case = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_snake_case = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_snake_case = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 362
|
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> None:
"""simple docstring"""
_snake_case = generate_pascal_triangle(_UpperCamelCase )
for row_idx in range(_UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = []
for current_row_idx in range(_UpperCamelCase ):
_snake_case = populate_current_row(_UpperCamelCase , _UpperCamelCase )
triangle.append(_UpperCamelCase )
return triangle
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: int ) -> list[int]:
"""simple docstring"""
_snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_snake_case , _snake_case = 1, 1
for current_col_idx in range(1 , _UpperCamelCase ):
calculate_current_element(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return current_row
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: list[int] , _UpperCamelCase: int , _UpperCamelCase: int , ) -> None:
"""simple docstring"""
_snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
_snake_case = triangle[current_row_idx - 1][current_col_idx]
_snake_case = above_to_left_elt + above_to_right_elt
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = [[1]]
for row_index in range(1 , _UpperCamelCase ):
_snake_case = [0] + result[-1] + [0]
_snake_case = row_index + 1
# Calculate the number of distinct elements in a row
_snake_case = sum(divmod(_UpperCamelCase , 2 ) )
_snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_snake_case = row_first_half + row_second_half
result.append(_UpperCamelCase )
return result
def __a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase: Callable , _UpperCamelCase: int ) -> None:
_snake_case = F"""{func.__name__}({value})"""
_snake_case = timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 142
| 0
|
"""simple docstring"""
A__ : Any = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 144
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ):
'''simple docstring'''
A_ : Any = {}
if train_file is not None:
A_ : Tuple = [train_file]
if eval_file is not None:
A_ : Tuple = [eval_file]
if test_file is not None:
A_ : Tuple = [test_file]
A_ : Optional[int] = datasets.load_dataset("""csv""" , data_files=lowerCamelCase__ )
A_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
A_ : List[Any] = features_name.pop(lowerCamelCase__ )
A_ : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
A_ : Any = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Tuple = tokenizer.model_input_names
A_ : List[str] = {}
if len(lowerCamelCase__ ) == 1:
for k in files.keys():
A_ : Optional[int] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) , batched=lowerCamelCase__ , )
elif len(lowerCamelCase__ ) == 2:
for k in files.keys():
A_ : Union[str, Any] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , ) , batched=lowerCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
A_ : int = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A_ : int = {k: v for k, v in ex.items() if k in input_names}
A_ : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A_ : str = {k: v for k, v in ex.items() if k in input_names}
A_ : List[str] = labelaid[ex[label_name]]
yield (d, label)
A_ : List[str] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A_ : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A_ : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A_ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A_ : Optional[int] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A_ : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase :Dict = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = field(metadata={'help': 'Which column contains the label'} )
__SCREAMING_SNAKE_CASE : str = field(default=__UpperCAmelCase , metadata={'help': 'The path of the training file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the development file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the test file'} )
__SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A_, A_, A_ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_, A_, A_, A_ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase__ ) -> Dict:
A_ : Any = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A_ : str = TFTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A_ : Union[str, Any] = trainer.evaluate()
A_ : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(lowerCamelCase__ )
return results
if __name__ == "__main__":
main()
| 135
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , ):
A_ : List[str] = parent
A_ : str = batch_size
A_ : Optional[Any] = image_size
A_ : List[str] = patch_size
A_ : List[str] = num_channels
A_ : List[str] = is_training
A_ : str = use_labels
A_ : List[str] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : Any = initializer_range
A_ : int = scope
A_ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Dict = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def _a (self ):
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : Any = ViTForMaskedImageModeling(lowercase )
model.to(lowercase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Dict = self.type_sequence_label_size
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Any = 1
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : str = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
),
) : Optional[int] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def _a (self ):
A_ : Any = ViTModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = ViTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Optional[int] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowercase )
A_ : List[str] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : str = model(**lowercase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowercase )
A_ : List[Any] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
A_ : Dict = prepare_img()
A_ : str = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : int = inputs.pixel_values.to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(lowercase , interpolate_pos_encoding=lowercase )
# verify the logits
A_ : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
A_ : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : Any = prepare_img()
A_ : List[str] = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : Any = inputs.pixel_values.to(lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : Optional[Any] = model(lowercase )
| 135
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Dict = """camembert"""
def __init__( self : str , _lowerCamelCase : Optional[Any]=3_05_22 , _lowerCamelCase : Dict=7_68 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : Union[str, Any]=30_72 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[str]=5_12 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : str=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Any=2 , _lowerCamelCase : Optional[int]="absolute" , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : List[str]=None , **_lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[int] = vocab_size
A_ : Tuple = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : Tuple = intermediate_size
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : Union[str, Any] = layer_norm_eps
A_ : Any = position_embedding_type
A_ : Optional[Any] = use_cache
A_ : Union[str, Any] = classifier_dropout
class lowercase ( __UpperCAmelCase):
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 167
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167
| 1
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __a ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCamelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def __a ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def __a ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCamelCase ):
http_head("https://huggingface.co" )
| 142
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """vit_msn"""
def __init__( self ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-06 ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
| 142
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
return " ".join(
''.join(word[::-1] ) if len(__snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 183
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( __snake_case : list[int] ) -> list[int]:
if len(__snake_case ) == 0:
return array
lowercase , lowercase : Tuple = min(__snake_case ), max(__snake_case )
# Compute the variables
lowercase : Optional[Any] = _max - _min + 1
lowercase , lowercase : List[str] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase : Tuple = i - _min
lowercase : str = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase : Union[str, Any] = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
lowercase : Tuple = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : str = input("""Enter numbers separated by comma:\n""")
_A : Optional[Any] = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 202
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase__ ():
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase__ ():
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def UpperCAmelCase__ ():
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head("https://huggingface.co" )
| 370
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = R"\w+[.]\d+"
__SCREAMING_SNAKE_CASE = re.findall(lowerCAmelCase_ , lowerCAmelCase_ )
for pat in pats:
__SCREAMING_SNAKE_CASE = key.replace(lowerCAmelCase_ , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=42 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE = flax_model.init_weights(PRNGKey(lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE = rename_key(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE = jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
| 195
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowerCAmelCase : Optional[int] =True
except (ImportError, AttributeError):
__lowerCAmelCase : Optional[Any] =object
def _UpperCamelCase ( *lowercase__ , **lowercase__ ):
pass
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : Union[str, Any] =logging.get_logger('transformers-cli/serving')
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase__ , args.host , args.port , args.workers )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : dict
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str]
SCREAMING_SNAKE_CASE__ : Optional[List[int]]
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any
class _lowercase ( A__ ):
'''simple docstring'''
@staticmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCAmelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCAmelCase__ , default=8_888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCAmelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCAmelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCAmelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCAmelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCAmelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self :int , lowerCAmelCase__ :Pipeline , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Any:
__SCREAMING_SNAKE_CASE : str = pipeline
__SCREAMING_SNAKE_CASE : Dict = host
__SCREAMING_SNAKE_CASE : List[Any] = port
__SCREAMING_SNAKE_CASE : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['''POST'''] , ),
] , timeout=600 , )
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__( self :Dict ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__( self :str , lowerCAmelCase__ :str = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ :bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Any:
try:
__SCREAMING_SNAKE_CASE : int = self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
__SCREAMING_SNAKE_CASE : List[str] = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCAmelCase__ )} )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ :bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ :bool = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ) -> List[Any]:
try:
__SCREAMING_SNAKE_CASE : int = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCAmelCase__ )} )
async def __magic_name__( self :Dict , lowerCAmelCase__ :str=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Optional[int]:
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__SCREAMING_SNAKE_CASE : str = self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCAmelCase__ )} )
| 9
|
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = VideoToVideoSDPipeline
_lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {'image', 'width', 'height'}
_lowerCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {'image'}
_lowerCamelCase : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
_lowerCamelCase : Optional[int] = False
# No `output_type`.
_lowerCamelCase : Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase ( self : str ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
_UpperCAmelCase = CLIPTextModel(UpperCamelCase_ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowercase ( self : int , snake_case_ : Optional[int] , snake_case_ : List[Any]=0 ):
# 3 frames
_UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(UpperCamelCase_ )
else:
_UpperCAmelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowercase ( self : Any ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = VideoToVideoSDPipeline(**UpperCamelCase_ )
_UpperCAmelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase_ )
_UpperCAmelCase = "np"
_UpperCAmelCase = sd_pipe(**UpperCamelCase_ ).frames
_UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
_UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowercase ( self : Tuple ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowercase ( self : Tuple ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowercase ( self : Tuple ):
pass
def lowercase ( self : str ):
return super().test_progress_bar()
@slow
@skip_mps
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=UpperCamelCase_ )
_UpperCAmelCase = video.to("cuda" )
_UpperCAmelCase = "Spiderman is surfing"
_UpperCAmelCase = pipe(UpperCamelCase_ , video=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=3 , output_type="pt" ).frames
_UpperCAmelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 357
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE :str = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
__SCREAMING_SNAKE_CASE :int = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = BertTokenizer
def __init__( self : Union[str, Any] , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : List[Any]="[PAD]" , snake_case_ : int="[CLS]" , snake_case_ : Dict="[MASK]" , snake_case_ : Any=True , snake_case_ : int=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(snake_case_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**snake_case_ )
_UpperCAmelCase = do_lower_case
def lowercase ( self : str , snake_case_ : str , snake_case_ : Any=None ):
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Any , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 156
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 135
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
assert x is not None
assert y is not None
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
# declaring the array for storing the dp values
__lowerCamelCase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase : Dict = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase : int = ""
__lowerCamelCase , __lowerCamelCase : int = m, n
while i > 0 and j > 0:
__lowerCamelCase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase : Any = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = '''AGGTAB'''
__A = '''GXTXAYB'''
__A = 4
__A = '''GTAB'''
__A, __A = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 135
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case : List[str] = """\
"""
__snake_case : str = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
__snake_case : List[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = True , _UpperCamelCase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCAmelCase__ = 'cuda'
else:
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = model.to(_UpperCamelCase )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCAmelCase__ = model.config.max_length - 1
else:
lowerCAmelCase__ = model.config.max_length
lowerCAmelCase__ = tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='pt' , return_attention_mask=_UpperCamelCase , ).to(_UpperCamelCase )
lowerCAmelCase__ = encodings['input_ids']
lowerCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCAmelCase__ = []
lowerCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_UpperCamelCase ) , _UpperCamelCase ) ):
lowerCAmelCase__ = min(start_index + batch_size , len(_UpperCamelCase ) )
lowerCAmelCase__ = encoded_texts[start_index:end_index]
lowerCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
lowerCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCamelCase )
lowerCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCamelCase ), attn_mask] , dim=1 )
lowerCAmelCase__ = encoded_batch
with torch.no_grad():
lowerCAmelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).logits
lowerCAmelCase__ = out_logits[..., :-1, :].contiguous()
lowerCAmelCase__ = labels[..., 1:].contiguous()
lowerCAmelCase__ = attn_mask[..., 1:].contiguous()
lowerCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCamelCase )}
| 122
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122
| 1
|
import datasets
from .evaluate import evaluate
_A : Optional[int] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_A : int = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCamelCase ( self : Dict , A : Tuple , A : Tuple ) ->int:
lowerCamelCase__ : Optional[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase__ : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Tuple = evaluate(dataset=A , predictions=A )
return score
| 142
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''file.csv'''
lowerCamelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / '''malformed_file.csv'''
lowerCamelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
lowerCamelCase__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
lowerCamelCase__ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = Csv()
lowerCamelCase__ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()[1]
lowerCamelCase__ : Any = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCamelCase__ : List[str] = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCamelCase__ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCamelCase__ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase ) for label in labels]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 142
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "labels"
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Dict):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.label_column} is not a ClassLabel.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Dict = self.label_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.label_column]
SCREAMING_SNAKE_CASE_: Optional[int] = label_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 127
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowerCAmelCase : str = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = EfficientNetConfig()
SCREAMING_SNAKE_CASE_: Any = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE_: Optional[Any] = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE_: List[Any] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE_: str = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: int = 10_00
SCREAMING_SNAKE_CASE_: int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any = idalabel
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: Optional[Any] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE_: Optional[Any] = sorted(set(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
SCREAMING_SNAKE_CASE_: List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE_: List[str] = block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE_: str = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE_: List[str] = "classifier.weight"
SCREAMING_SNAKE_CASE_: Optional[Any] = "classifier.bias"
return key_mapping
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE_: List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE_: Tuple = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
SCREAMING_SNAKE_CASE_: List[str] = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model_classes[model_name](
include_top=_UpperCAmelCase , weights="imagenet" , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=10_00 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE_: Tuple = original_model.trainable_variables
SCREAMING_SNAKE_CASE_: Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE_: List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE_: str = param.numpy()
SCREAMING_SNAKE_CASE_: Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE_: Any = get_efficientnet_config(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_: str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE_: Tuple = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE_: Optional[Any] = convert_image_processor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = hf_model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE_: Tuple = image.img_to_array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = np.expand_dims(_UpperCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE_: str = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub..." )
SCREAMING_SNAKE_CASE_: Optional[Any] = f"efficientnet-{model_name}"
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 127
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCAmelCase ( __lowerCamelCase ):
_a = """levit"""
def __init__( self , lowerCAmelCase=224 , lowerCAmelCase=3 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=16 , lowerCAmelCase=[128, 256, 384] , lowerCAmelCase=[4, 8, 12] , lowerCAmelCase=[4, 4, 4] , lowerCAmelCase=[16, 16, 16] , lowerCAmelCase=0 , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=0.02 , **lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
_lowercase =image_size
_lowercase =num_channels
_lowercase =kernel_size
_lowercase =stride
_lowercase =padding
_lowercase =hidden_sizes
_lowercase =num_attention_heads
_lowercase =depths
_lowercase =key_dim
_lowercase =drop_path_rate
_lowercase =patch_size
_lowercase =attention_ratio
_lowercase =mlp_ratio
_lowercase =initializer_range
_lowercase =[
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCAmelCase ( __lowerCamelCase ):
_a = version.parse("""1.11""" )
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return 1e-4
| 205
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = r'\w+[.]\d+'
lowercase = re.findall(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for pat in pats:
lowercase = key.replace(__SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase = flax_model.init_weights(PRNGKey(__SCREAMING_SNAKE_CASE ) )
lowercase = flatten_dict(__SCREAMING_SNAKE_CASE )
lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
lowercase , lowercase = rename_key_and_reshape_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase = jnp.asarray(__SCREAMING_SNAKE_CASE )
return unflatten_dict(__SCREAMING_SNAKE_CASE )
| 195
| 0
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __UpperCamelCase ( _A=32 , _A=10 , _A=100 , _A=1026 , _A=True , _A="data/tokenized_stories_train_wikitext103.jbl" , _A="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowerCAmelCase_ , lowerCAmelCase_ = generate_datasets(
_A , _A , number=_A , min_len=1026 , trim=_A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowerCAmelCase_ = load_gpta('''gpt2''' ).to(_A )
print('''computing perplexity on objective set''' )
lowerCAmelCase_ = compute_perplexity(_A , _A , _A ).item()
print('''perplexity on objective set:''' , _A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_A , _A , _A , _A , _A , _A , _A , _A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __UpperCamelCase ( _A , _A=15 , _A=128 , _A=100 , _A="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowerCAmelCase_ = SecondaryLearner(_A )
# Train secondary learner
lowerCAmelCase_ = train_secondary_learner(
_A , _A , max_epochs=_A , batch_size=_A , eval_freq=100 , igf_model_path=_A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __UpperCamelCase ( _A , _A , _A , _A=32 , _A=1000 , _A=16 , _A=1.0 , _A=recopy_gpta , _A=None , _A=10 , _A="gpt2_finetuned.pt" , ):
lowerCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase_ = RandomSampler(_A )
lowerCAmelCase_ = DataLoader(_A , sampler=_A )
lowerCAmelCase_ = max_steps // (len(_A )) + 1
lowerCAmelCase_ = 0
lowerCAmelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=_A )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = recopy_model(_A , _A , _A )
model.train()
if secondary_learner is not None:
secondary_learner.to(_A )
secondary_learner.eval()
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = []
lowerCAmelCase_ = []
# Compute the performance of the transformer model at the beginning
lowerCAmelCase_ = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print('''Test perplexity, step''' , _A , ''':''' , _A )
for epoch in range(int(_A ) ):
for step, example in enumerate(_A ):
torch.cuda.empty_cache()
lowerCAmelCase_ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCAmelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCAmelCase_ = model(_A , labels=_A )
lowerCAmelCase_ = True
if secondary_learner is not None:
lowerCAmelCase_ = secondary_learner.forward(
torch.tensor(_A , dtype=torch.long , device=_A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCAmelCase_ = -1
if predicted_q < threshold:
lowerCAmelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCAmelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCAmelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCAmelCase_ = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print('''Test perplexity, step''' , _A , ''':''' , _A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_A , type=_A , required=_A , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_A , default=_A , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_A , default=_A , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_A , type=_A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_A , default=_A , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=_A , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=_A , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=_A , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=_A , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_A , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_A , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=_A , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=_A , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_A , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_A , type=_A , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_A , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_A , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_A , type=_A , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_A , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowerCAmelCase_ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowerCAmelCase_ = training_secondary_learner(
_A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCAmelCase_ , lowerCAmelCase_ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=_A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_A , _A , _A , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_A , secondary_learner=_A , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 167
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets )
if __name__ == "__main__":
_A = input().strip()
_A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 167
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[str]:
_A : Optional[Any] = parent
_A : int = batch_size
_A : List[Any] = seq_length
_A : List[Any] = is_training
_A : str = use_input_mask
_A : List[Any] = use_token_type_ids
_A : Any = use_labels
_A : Dict = vocab_size
_A : List[str] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Tuple = intermediate_size
_A : Optional[int] = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Any = type_vocab_size
_A : Optional[Any] = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : Any = num_labels
_A : int = num_choices
_A : Any = scope
def a__ ( self ) -> List[Any]:
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Union[str, Any] = None
if self.use_input_mask:
_A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Tuple = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
if self.use_labels:
_A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_A : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : Optional[int] = LlamaModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a )
_A : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
_A : Union[str, Any] = True
_A : Any = LlamaModel(_a )
model.to(_a )
model.eval()
_A : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_A : Optional[int] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
_A : Optional[int] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_A : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any:
_A : Optional[Any] = True
_A : Tuple = True
_A : str = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
_A : Union[str, Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
_A : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Dict = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
_A : Optional[int] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
_A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> int:
_A : List[Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Any = config_and_inputs
_A : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_a = (LlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = LlamaModelTester(self )
_A : Dict = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Optional[Any] = type
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Optional[Any] = input_dict["""input_ids"""]
_A : Tuple = input_ids.ne(1 ).to(_a )
_A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Tuple = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> Tuple:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = 3
_A : Union[str, Any] = """single_label_classification"""
_A : Union[str, Any] = input_dict["""input_ids"""]
_A : int = input_ids.ne(1 ).to(_a )
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Optional[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> str:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Tuple = """multi_label_classification"""
_A : List[str] = input_dict["""input_ids"""]
_A : Union[str, Any] = input_ids.ne(1 ).to(_a )
_A : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_A : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Tuple = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def a__ ( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def a__ ( self , _a ) -> Any:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = ids_tensor([1, 10] , config.vocab_size )
_A : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : str = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
_A : Dict = original_model(_a ).last_hidden_state
_A : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
_A : Optional[Any] = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
_A : Tuple = scaled_model(_a ).last_hidden_state
_A : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Dict:
_A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_A : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_A : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Any:
_A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_A : Dict = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Optional[Any]:
_A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_A : Optional[int] = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_A : int = model(torch.tensor(_a ) )
_A : Tuple = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# fmt: off
_A : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def a__ ( self ) -> Any:
_A : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_A : List[Any] = """Simply put, the theory of relativity states that """
_A : Dict = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_A : List[str] = tokenizer.encode(_a , return_tensors="""pt""" )
_A : Optional[Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_a )
# greedy generation outputs
_A : Optional[Any] = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a )
_A : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 26
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ['''image_processor''']
A__ : Any = '''SamImageProcessor'''
def __init__( self : Tuple , _snake_case : Tuple ):
super().__init__(_snake_case )
__lowercase : str = self.image_processor
__lowercase : Any = -10
__lowercase : Dict = self.image_processor.size['''longest_edge''']
def __call__( self : Dict , _snake_case : str=None , _snake_case : Any=None , _snake_case : List[str]=None , _snake_case : Any=None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : List[Any] , ):
__lowercase : List[str] = self.image_processor(
_snake_case , return_tensors=_snake_case , **_snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__lowercase : Optional[int] = encoding_image_processor['''original_sizes''']
if hasattr(_snake_case , '''numpy''' ): # Checks if Torch or TF tensor
__lowercase : Optional[int] = original_sizes.numpy()
__lowercase , __lowercase , __lowercase : str = self._check_and_preprocess_points(
input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , )
__lowercase : int = self._normalize_and_convert(
_snake_case , _snake_case , input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , return_tensors=_snake_case , )
return encoding_image_processor
def snake_case_ ( self : List[str] , _snake_case : int , _snake_case : Optional[int] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=None , _snake_case : str="pt" , ):
if input_points is not None:
if len(_snake_case ) != len(_snake_case ):
__lowercase : Optional[Any] = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] ) for point in input_points
]
else:
__lowercase : List[Any] = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case )
for point, original_size in zip(_snake_case , _snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowercase , __lowercase : Tuple = self._pad_points_and_labels(_snake_case , _snake_case )
__lowercase : Dict = np.array(_snake_case )
if input_labels is not None:
__lowercase : Dict = np.array(_snake_case )
if input_boxes is not None:
if len(_snake_case ) != len(_snake_case ):
__lowercase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] , is_bounding_box=_snake_case )
for box in input_boxes
]
else:
__lowercase : Tuple = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case , is_bounding_box=_snake_case )
for box, original_size in zip(_snake_case , _snake_case )
]
__lowercase : Dict = np.array(_snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__lowercase : int = torch.from_numpy(_snake_case )
# boxes batch size of 1 by default
__lowercase : List[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowercase : Dict = tf.convert_to_tensor(_snake_case )
# boxes batch size of 1 by default
__lowercase : int = tf.expand_dims(_snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowercase : Tuple = torch.from_numpy(_snake_case )
# point batch size of 1 by default
__lowercase : Tuple = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowercase : List[Any] = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
__lowercase : Optional[int] = tf.expand_dims(_snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowercase : int = torch.from_numpy(_snake_case )
# point batch size of 1 by default
__lowercase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowercase : Any = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
__lowercase : Union[str, Any] = tf.expand_dims(_snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def snake_case_ ( self : int , _snake_case : Any , _snake_case : str ):
__lowercase : Union[str, Any] = max([point.shape[0] for point in input_points] )
__lowercase : List[Any] = []
for i, point in enumerate(_snake_case ):
if point.shape[0] != expected_nb_points:
__lowercase : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowercase : Tuple = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_snake_case )
__lowercase : List[Any] = processed_input_points
return input_points, input_labels
def snake_case_ ( self : Dict , _snake_case : int , _snake_case : np.ndarray , _snake_case : Any , _snake_case : Any=False ):
__lowercase , __lowercase : Tuple = original_size
__lowercase , __lowercase : Optional[Any] = self.image_processor._get_preprocess_shape(_snake_case , longest_edge=_snake_case )
__lowercase : Optional[int] = deepcopy(_snake_case ).astype(_snake_case )
if is_bounding_box:
__lowercase : str = coords.reshape(-1 , 2 , 2 )
__lowercase : Dict = coords[..., 0] * (new_w / old_w)
__lowercase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowercase : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def snake_case_ ( self : List[str] , _snake_case : List[Any]=None , _snake_case : Any=None , _snake_case : int=None , ):
if input_points is not None:
if hasattr(_snake_case , '''numpy''' ): # Checks for TF or Torch tensor
__lowercase : Tuple = input_points.numpy().tolist()
if not isinstance(_snake_case , _snake_case ) or not isinstance(input_points[0] , _snake_case ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__lowercase : str = [np.array(_snake_case ) for input_point in input_points]
else:
__lowercase : str = None
if input_labels is not None:
if hasattr(_snake_case , '''numpy''' ):
__lowercase : Any = input_labels.numpy().tolist()
if not isinstance(_snake_case , _snake_case ) or not isinstance(input_labels[0] , _snake_case ):
raise ValueError('''Input labels must be a list of list integers.''' )
__lowercase : List[Any] = [np.array(_snake_case ) for label in input_labels]
else:
__lowercase : Tuple = None
if input_boxes is not None:
if hasattr(_snake_case , '''numpy''' ):
__lowercase : str = input_boxes.numpy().tolist()
if (
not isinstance(_snake_case , _snake_case )
or not isinstance(input_boxes[0] , _snake_case )
or not isinstance(input_boxes[0][0] , _snake_case )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__lowercase : List[Any] = [np.array(_snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__lowercase : Dict = None
return input_points, input_labels, input_boxes
@property
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(_snake_case ) )
def snake_case_ ( self : str , *_snake_case : Union[str, Any] , **_snake_case : Dict ):
return self.image_processor.post_process_masks(*_snake_case , **_snake_case )
| 156
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=3 , __a=32 , __a=3 , __a=10 , __a=[10, 20, 30, 40] , __a=[1, 1, 2, 1] , __a=True , __a=True , __a="relu" , __a=3 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(__a )
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TFResNetModel(config=__a )
__lowerCAmelCase = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFResNetForImageClassification(__a )
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : Union[str, Any] =(
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] =False
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : int =False
def snake_case ( self ):
__lowerCAmelCase = TFResNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def snake_case ( self ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def snake_case ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFResNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
__lowerCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__a , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1e-4 ) )
| 259
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ):
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case ( self , __a ):
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = AutoformerModel(config=__a ).to(__a ).eval()
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__a )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Tuple ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Dict =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self ):
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["missing_keys"] , [] )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="Model has no tokens embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = inspect.signature(getattr(__a , "forward" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , "seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "d_model" , __a )
__lowerCAmelCase = getattr(self.model_tester , "num_attention_heads" , __a )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def _lowerCamelCase ( _UpperCamelCase="train-batch.pt" ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_UpperCamelCase , repo_type="dataset" )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__a )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
| 259
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase__ ( a__ : int ) -> Tuple:
if "cls_token" in name:
UpperCamelCase_ = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
UpperCamelCase_ = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
UpperCamelCase_ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase_ = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCamelCase_ = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase_ = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
UpperCamelCase_ = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCamelCase_ = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
UpperCamelCase_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase_ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase_ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase_ = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCamelCase_ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCamelCase_ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCamelCase_ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase_ = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase_ = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def lowerCamelCase__ ( a__ : Dict , a__ : Optional[int] ) -> str:
for key in orig_state_dict.copy().keys():
UpperCamelCase_ = orig_state_dict.pop(a__ )
if "qkv" in key:
UpperCamelCase_ = key.split(""".""" )
UpperCamelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase_ = config.decoder_hidden_size
UpperCamelCase_ = """decoder.decoder_layers."""
if "weight" in key:
UpperCamelCase_ = val[:dim, :]
UpperCamelCase_ = val[dim : dim * 2, :]
UpperCamelCase_ = val[-dim:, :]
elif "bias" in key:
UpperCamelCase_ = val[:dim]
UpperCamelCase_ = val[dim : dim * 2]
UpperCamelCase_ = val[-dim:]
else:
UpperCamelCase_ = config.hidden_size
UpperCamelCase_ = """vit.encoder.layer."""
if "weight" in key:
UpperCamelCase_ = val[:dim, :]
UpperCamelCase_ = val[dim : dim * 2, :]
UpperCamelCase_ = val[-dim:, :]
elif "bias" in key:
UpperCamelCase_ = val[:dim]
UpperCamelCase_ = val[dim : dim * 2]
UpperCamelCase_ = val[-dim:]
else:
UpperCamelCase_ = val
return orig_state_dict
def lowerCamelCase__ ( a__ : int , a__ : Union[str, Any] ) -> Optional[int]:
UpperCamelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase_ = 1024
UpperCamelCase_ = 4096
UpperCamelCase_ = 24
UpperCamelCase_ = 16
elif "huge" in checkpoint_url:
UpperCamelCase_ = 14
UpperCamelCase_ = 1280
UpperCamelCase_ = 5120
UpperCamelCase_ = 32
UpperCamelCase_ = 16
UpperCamelCase_ = ViTMAEForPreTraining(a__ )
UpperCamelCase_ = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" )["""model"""]
UpperCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase_ = convert_state_dict(a__ , a__ )
model.load_state_dict(a__ )
model.eval()
UpperCamelCase_ = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
UpperCamelCase_ = Image.open(requests.get(a__ , stream=a__ ).raw )
UpperCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase_ = image_processor(images=a__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase_ = model(**a__ )
UpperCamelCase_ = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase_ = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
UpperCamelCase_ = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
UpperCamelCase_ = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 122
|
_A = [0, 2, 4, 6, 8]
_A = [1, 3, 5, 7, 9]
def lowerCamelCase__ ( a__ : int , a__ : int , a__ : list[int] , a__ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCamelCase_ = 0
for digit in range(10 ):
UpperCamelCase_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a__ , a__ )
return result
UpperCamelCase_ = 0
for digita in range(10 ):
UpperCamelCase_ = digita
if (remainder + digita) % 2 == 0:
UpperCamelCase_ = ODD_DIGITS
else:
UpperCamelCase_ = EVEN_DIGITS
for digita in other_parity_digits:
UpperCamelCase_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a__ , a__ , )
return result
def lowerCamelCase__ ( a__ : int = 9 ) -> int:
UpperCamelCase_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a__ , 0 , [0] * length , a__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase :
lowerCAmelCase : Optional[int] = 42
# setable values
lowerCAmelCase : Optional[int] = 42
lowerCAmelCase : Optional[int] = 42
lowerCAmelCase : Tuple = None
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return cls(common=_SCREAMING_SNAKE_CASE , init_noise_sigma=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[Any] = 42
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase : Any = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase : Optional[Any] = 42
@property
def __A ( self ):
return True
@register_to_config
def __init__( self , UpperCAmelCase__ = 1_000 , UpperCAmelCase__ = 0.0_001 , UpperCAmelCase__ = 0.02 , UpperCAmelCase__ = "linear" , UpperCAmelCase__ = None , UpperCAmelCase__ = "fixed_small" , UpperCAmelCase__ = True , UpperCAmelCase__ = "epsilon" , UpperCAmelCase__ = jnp.floataa , ):
A__ = dtype
def __A ( self , UpperCAmelCase__ = None ):
if common is None:
A__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
A__ = jnp.array(1.0 , dtype=self.dtype )
A__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_SCREAMING_SNAKE_CASE , init_noise_sigma=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None ):
return sample
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = () ):
A__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
A__ = (jnp.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ):
A__ = state.common.alphas_cumprod[t]
A__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
A__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
A__ = jnp.clip(_SCREAMING_SNAKE_CASE , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
A__ = jnp.log(jnp.clip(_SCREAMING_SNAKE_CASE , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
A__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
A__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
A__ = variance
A__ = state.common.betas[t]
A__ = (predicted_variance + 1) / 2
A__ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , ):
A__ = timestep
if key is None:
A__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
A__ = jnp.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
A__ = None
# 1. compute alphas, betas
A__ = state.common.alphas_cumprod[t]
A__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
A__ = 1 - alpha_prod_t
A__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A__ = model_output
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A__ = jnp.clip(_SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
A__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
A__ = jax.random.split(_SCREAMING_SNAKE_CASE , num=1 )
A__ = jax.random.normal(_SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
A__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
A__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
return add_noise_common(state.common , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
return get_velocity_common(state.common , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __len__( self ):
return self.config.num_train_timesteps
| 371
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = field
A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths}
A__ = Json(
cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , field=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = path_or_buf
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = "utf-8"
A__ = to_json_kwargs
def __A ( self ):
A__ = self.to_json_kwargs.pop("path_or_buf" , UpperCAmelCase__ )
A__ = self.to_json_kwargs.pop("orient" , "records" )
A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
A__ = self.to_json_kwargs.pop("compression" , UpperCAmelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=UpperCAmelCase__ ) as buffer:
A__ = self._write(file_obj=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
A__ = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
return written
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ , A__ , A__ = args
A__ = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **UpperCAmelCase__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ):
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase__ )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase__ , UpperCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(UpperCAmelCase__ )
return written
| 198
| 0
|
from typing import Dict, Optional
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE : List[str] = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
_SCREAMING_SNAKE_CASE : List[Any] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
_SCREAMING_SNAKE_CASE : Dict = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = None ,UpperCamelCase_ = False ,):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case = new_id
# turn into Numpy arrays
snake_case = np.array(UpperCamelCase_ )
snake_case = np.array(UpperCamelCase_ )
if reduce_labels:
snake_case = 2_55
snake_case = label - 1
snake_case = 2_55
snake_case = label != ignore_index
snake_case = np.not_equal(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = pred_label[mask]
snake_case = np.array(UpperCamelCase_ )[mask]
snake_case = pred_label[pred_label == label]
snake_case = np.histogram(UpperCamelCase_ ,bins=UpperCamelCase_ ,range=(0, num_labels - 1) )[0]
snake_case = np.histogram(UpperCamelCase_ ,bins=UpperCamelCase_ ,range=(0, num_labels - 1) )[0]
snake_case = np.histogram(UpperCamelCase_ ,bins=UpperCamelCase_ ,range=(0, num_labels - 1) )[0]
snake_case = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = None ,UpperCamelCase_ = False ,):
"""simple docstring"""
snake_case = np.zeros((num_labels,) ,dtype=np.floataa )
snake_case = np.zeros((num_labels,) ,dtype=np.floataa )
snake_case = np.zeros((num_labels,) ,dtype=np.floataa )
snake_case = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case , snake_case , snake_case , snake_case = intersect_and_union(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = None ,UpperCamelCase_ = None ,UpperCamelCase_ = False ,):
"""simple docstring"""
snake_case , snake_case , snake_case , snake_case = total_intersect_and_union(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# compute metrics
snake_case = {}
snake_case = total_area_intersect.sum() / total_area_label.sum()
snake_case = total_area_intersect / total_area_union
snake_case = total_area_intersect / total_area_label
snake_case = np.nanmean(UpperCamelCase_ )
snake_case = np.nanmean(UpperCamelCase_ )
snake_case = all_acc
snake_case = iou
snake_case = acc
if nan_to_num is not None:
snake_case = {metric: np.nan_to_num(UpperCamelCase_ ,nan=UpperCamelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = False , ):
snake_case = mean_iou(
results=__snake_case , gt_seg_maps=__snake_case , num_labels=__snake_case , ignore_index=__snake_case , nan_to_num=__snake_case , label_map=__snake_case , reduce_labels=__snake_case , )
return iou_result
| 127
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
__magic_name__ = XGLMConfig
__magic_name__ = {}
__magic_name__ = 'gelu'
def __init__( self , __snake_case , __snake_case=1_4 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = d_model
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = ffn_dim
snake_case = activation_function
snake_case = activation_dropout
snake_case = attention_dropout
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = None
snake_case = 0
snake_case = 2
snake_case = 1
def a_ ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def a_ ( self ):
snake_case = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = self.get_config()
snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a_ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__snake_case , )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = TFXGLMModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
@slow
def a_ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFXGLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def a_ ( self ):
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self , __snake_case=True ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case = model.generate(__snake_case , do_sample=__snake_case , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
@slow
def a_ ( self ):
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
snake_case = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
snake_case = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
snake_case = model.generate(__snake_case , do_sample=__snake_case , seed=[7, 0] )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case )
snake_case = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__snake_case , __snake_case )
@slow
def a_ ( self ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = '''left'''
# use different length sentences to test batching
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
snake_case = tokenizer(__snake_case , return_tensors='''tf''' , padding=__snake_case )
snake_case = inputs['''input_ids''']
snake_case = model.generate(input_ids=__snake_case , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case )
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
| 127
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : List[str] , **__lowercase : Any ) -> List[Any]:
pass
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__ ( self : Any , __lowercase : str , __lowercase : int , __lowercase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : str =pipeline(
'''document-question-answering''' , model=__lowercase , tokenizer=__lowercase , image_processor=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Optional[int] =list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , '''''' ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''What is the placebo?'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
{
'''image''': load_image(__lowercase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__ ( self : Any , __lowercase : int , __lowercase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =dqa_pipeline(__lowercase , top_k=2 )
self.assertEqual(
__lowercase , [
[
{'''score''': ANY(__lowercase ), '''answer''': ANY(__lowercase ), '''start''': ANY(__lowercase ), '''end''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''answer''': ANY(__lowercase ), '''start''': ANY(__lowercase ), '''end''': ANY(__lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE__ : str =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''How many cats are there?'''
SCREAMING_SNAKE_CASE__ : int =[
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE__ : int =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , __lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ : Dict ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ : Optional[int] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : Any =dqa_pipeline(image=__lowercase , question=__lowercase , words=__lowercase , boxes=__lowercase , top_k=2 )
self.assertEqual(__lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE__ : List[str] =INVOICE_URL
SCREAMING_SNAKE_CASE__ : int ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : Tuple =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ : str =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : List[Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : Tuple =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowercase )
SCREAMING_SNAKE_CASE__ : int =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowercase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE__ : str =INVOICE_URL
SCREAMING_SNAKE_CASE__ : int ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ : List[Any] =list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ : int =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowercase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ : int =INVOICE_URL
SCREAMING_SNAKE_CASE__ : Tuple ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : List[Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ : Dict =list(zip(*apply_tesseract(load_image(__lowercase ) , __lowercase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ : Optional[int] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE__ : List[str] =INVOICE_URL
SCREAMING_SNAKE_CASE__ : int ='''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(nested_simplify(__lowercase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__ ( self : int ) -> Tuple:
pass
| 222
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE__ : str =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : List[Any] ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images
SCREAMING_SNAKE_CASE__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : int =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE__ : Any ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__ : Dict =output.images
SCREAMING_SNAKE_CASE__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE__ : Tuple =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =output.images
SCREAMING_SNAKE_CASE__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222
| 1
|
"""simple docstring"""
import numpy as np
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """transfo-xl"""
__lowerCAmelCase : Optional[Any] = ["""mems"""]
__lowerCAmelCase : List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , _lowerCamelCase : List[Any]=26_77_35 , _lowerCamelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _lowerCamelCase : str=10_24 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : int=64 , _lowerCamelCase : Optional[int]=40_96 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=18 , _lowerCamelCase : Optional[Any]=16_00 , _lowerCamelCase : Optional[int]=10_00 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]="normal" , _lowerCamelCase : int=0.01 , _lowerCamelCase : List[str]=0.01 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : int=0 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[Any] = vocab_size
A_ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
A_ : str = [False] + [True] * len(self.cutoffs )
else:
A_ : str = [False] + [False] * len(self.cutoffs )
A_ : Optional[Any] = d_model
A_ : Dict = d_embed
A_ : List[str] = d_head
A_ : List[Any] = d_inner
A_ : Dict = div_val
A_ : int = pre_lnorm
A_ : Optional[Any] = n_layer
A_ : List[Any] = n_head
A_ : List[Any] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Any = clamp_len
A_ : Dict = sample_softmax
A_ : List[Any] = adaptive
A_ : Union[str, Any] = dropout
A_ : List[Any] = dropatt
A_ : Any = untie_r
A_ : Optional[int] = init
A_ : int = init_range
A_ : List[Any] = proj_init_std
A_ : Union[str, Any] = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 167
| 1
|
"""simple docstring"""
import string
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Tuple = ''''''
for i in sequence:
_UpperCAmelCase : Union[str, Any] = ord(UpperCamelCase__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Dict = string.ascii_letters
_UpperCAmelCase : List[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence )
def lowerCamelCase_ ():
from timeit import timeit
print('''Running performance benchmarks...''' )
_UpperCAmelCase : Union[str, Any] = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCamelCase__ )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=UpperCamelCase__ )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 68
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''Translation''' ,init=a ,repr=a )
def __call__( self ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''TranslationVariableLanguages''' ,init=a ,repr=a )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = sorted(set(self.languages ) ) if self.languages else None
_UpperCAmelCase : List[str] = len(self.languages ) if self.languages else None
def __call__( self ) -> str:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : List[str] = set(self.languages )
if self.languages and set(A ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(A ) - lang_set ) )}) are not in valid set ({", ".join(A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(A , A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = zip(*sorted(A ) )
return {"language": languages, "translation": translations}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 68
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] ='distilbert'
UpperCamelCase_ : Optional[Any] ={
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=4 * 768 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.2 , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ , ) -> Any:
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :str = max_position_embeddings
UpperCamelCase :str = sinusoidal_pos_embds
UpperCamelCase :str = n_layers
UpperCamelCase :Union[str, Any] = n_heads
UpperCamelCase :int = dim
UpperCamelCase :Tuple = hidden_dim
UpperCamelCase :Any = dropout
UpperCamelCase :str = attention_dropout
UpperCamelCase :List[Any] = activation
UpperCamelCase :str = initializer_range
UpperCamelCase :List[Any] = qa_dropout
UpperCamelCase :Tuple = seq_classif_dropout
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase :Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 259
|
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A : bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(default=_UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
A : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case__ ( self : Dict ):
if self.train_file is not None:
__snake_case : List[str] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : PreTrainedTokenizerBase
A : Union[bool, str, PaddingStrategy] = True
A : Optional[int] = None
A : Optional[int] = None
def __call__( self : str , _lowerCAmelCase : Union[str, Any] ):
__snake_case : int = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : int = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : int = len(_UpperCAmelCase )
__snake_case : List[str] = len(features[0]["""input_ids"""] )
__snake_case : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : List[str] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__snake_case : List[Any] = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : List[str] = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : List[str] = {}
if data_args.train_file is not None:
__snake_case : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : Dict = data_args.validation_file
__snake_case : Dict = data_args.train_file.split(""".""" )[-1]
__snake_case : Any = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : int = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : Optional[Any] = [F'''ending{i}''' for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : List[str] = 'sent2'
if data_args.max_seq_length is None:
__snake_case : str = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__snake_case : Optional[Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__snake_case : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__SCREAMING_SNAKE_CASE : List[Any] ):
__snake_case : List[str] = [[context] * 4 for context in examples[context_name]]
__snake_case : List[str] = examples[question_header_name]
__snake_case : List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__snake_case : List[Any] = list(chain(*lowerCAmelCase_ ) )
__snake_case : Tuple = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__snake_case : Union[str, Any] = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__snake_case : List[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Dict = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__snake_case : Tuple = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__snake_case : int = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : str = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__snake_case : Optional[int] = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__snake_case : str = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : Optional[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__SCREAMING_SNAKE_CASE : Any ):
__snake_case : Optional[Any] = eval_predictions
__snake_case : Dict = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__snake_case : List[str] = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : int = last_checkpoint
__snake_case : Union[str, Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : Optional[Any] = train_result.metrics
__snake_case : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__snake_case : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__snake_case : Union[str, Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 368
|
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_a = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : str = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[int] = ffn_dropout
lowercase__ : int = attn_dropout
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = MobileViTVaModelTester(self )
lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCAmelCase( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = model.to(__lowerCAmelCase )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__lowerCAmelCase )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : List[str] = model.to(__lowerCAmelCase )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = prepare_img()
lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.detach().cpu()
lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase__ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 198
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "ResNetConfig"
# Base docstring
__A = "microsoft/resnet-50"
__A = [1, 2048, 7, 7]
# Image classification docstring
__A = "microsoft/resnet-50"
__A = "tiger cat"
__A = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu") ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict =nn.Convad(
UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=kernel_size // 2 , bias=UpperCAmelCase_)
lowerCamelCase__: Any =nn.BatchNormad(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.convolution(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.normalization(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : ResNetConfig) ->str:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
lowerCamelCase__: Optional[int] =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
lowerCamelCase__: Optional[Any] =config.num_channels
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
lowerCamelCase__: Dict =self.embedder(UpperCAmelCase_)
lowerCamelCase__: str =self.pooler(UpperCAmelCase_)
return embedding
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2) ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[Any] =nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , stride=UpperCAmelCase_ , bias=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =nn.BatchNormad(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.convolution(UpperCAmelCase_)
lowerCamelCase__: Any =self.normalization(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu") ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =in_channels != out_channels or stride != 1
lowerCamelCase__: str =(
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__: Tuple =nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , activation=UpperCAmelCase_) , )
lowerCamelCase__: Optional[Any] =ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =hidden_state
lowerCamelCase__: List[str] =self.layer(UpperCAmelCase_)
lowerCamelCase__: str =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Dict =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 4) ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Union[str, Any] =in_channels != out_channels or stride != 1
lowerCamelCase__: List[str] =out_channels // reduction
lowerCamelCase__: Optional[Any] =(
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__: Dict =nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_) , )
lowerCamelCase__: Tuple =ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: str =hidden_state
lowerCamelCase__: Optional[Any] =self.layer(UpperCAmelCase_)
lowerCamelCase__: List[Any] =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Tuple =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , ) ->Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase__: List[Any] =ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowerCamelCase__: List[str] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , activation=config.hidden_act) , *[layer(UpperCAmelCase_ , UpperCAmelCase_ , activation=config.hidden_act) for _ in range(depth - 1)] , )
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: List[Any] =input
for layer in self.layers:
lowerCamelCase__: Any =layer(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : ResNetConfig) ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
lowerCamelCase__: int =zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(UpperCAmelCase_ , config.depths[1:]):
self.stages.append(ResNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True) ->BaseModelOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: str =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__: Union[str, Any] =hidden_states + (hidden_state,)
lowerCamelCase__: str =stage_module(UpperCAmelCase_)
if output_hidden_states:
lowerCamelCase__: Optional[int] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ResNetConfig
lowercase_ = "resnet"
lowercase_ = "pixel_values"
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=False) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[str] =value
__A = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : str) ->int:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: str =config
lowerCamelCase__: str =ResNetEmbeddings(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =ResNetEncoder(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None) ->BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowerCamelCase__: Optional[int] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: int =self.embedder(UpperCAmelCase_)
lowerCamelCase__: int =self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: int =encoder_outputs[0]
lowerCamelCase__: Tuple =self.pooler(UpperCAmelCase_)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: int =config.num_labels
lowerCamelCase__: Optional[Any] =ResNetModel(UpperCAmelCase_)
# classification head
lowerCamelCase__: Optional[Any] =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: List[Any] =self.resnet(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: int =outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__: Dict =self.classifier(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__: Dict ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__: Tuple ="single_label_classification"
else:
lowerCamelCase__: Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase__: Dict =MSELoss()
if self.num_labels == 1:
lowerCamelCase__: str =loss_fct(logits.squeeze() , labels.squeeze())
else:
lowerCamelCase__: int =loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__: List[Any] =CrossEntropyLoss()
lowerCamelCase__: int =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__: List[str] =BCEWithLogitsLoss()
lowerCamelCase__: int =loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
if not return_dict:
lowerCamelCase__: List[str] =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
super()._init_backbone(UpperCAmelCase_)
lowerCamelCase__: int =[config.embedding_size] + config.hidden_sizes
lowerCamelCase__: List[Any] =ResNetEmbeddings(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =ResNetEncoder(UpperCAmelCase_)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None) ->BackboneOutput:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: int =self.embedder(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.encoder(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: Any =outputs.hidden_states
lowerCamelCase__: int =()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase__: Dict =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase_ , )
| 359
|
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(__a ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
for j in range(__a ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> list[float]:
"""simple docstring"""
lowerCamelCase__: List[str] =[float("inf" )] * vertex_count
lowerCamelCase__: List[str] =0.0
for _ in range(vertex_count - 1 ):
for j in range(__a ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowerCamelCase__: int =distance[u] + w
lowerCamelCase__: Tuple =check_negative_cycle(__a , __a , __a )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Enter number of vertices: ").strip())
__A = int(input("Enter number of edges: ").strip())
__A = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__A , __A , __A = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__A = {"src": src, "dst": dest, "weight": weight}
__A = int(input("\nEnter shortest path source:").strip())
__A = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 273
| 0
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
def __init__( self , A_ , A_=99 , A_=13 , A_=7 , A_=9 , A_=True , A_=True , A_=False , A_=32 , A_=5 , A_=4 , A_=37 , A_=8 , A_=0.1 , A_=0.002 , A_=1 , A_=0 , A_=0 , A_=None , A_=None , ) -> Any:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = encoder_seq_length
UpperCamelCase = decoder_seq_length
# For common tests
UpperCamelCase = self.decoder_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = d_ff
UpperCamelCase = relative_attention_num_buckets
UpperCamelCase = dropout_rate
UpperCamelCase = initializer_factor
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = decoder_start_token_id
UpperCamelCase = None
UpperCamelCase = decoder_layers
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_=None , A_=None , A_=None , A_=None , A_=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=A_ )
if decoder_head_mask is None:
UpperCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=A_ )
if cross_attn_head_mask is None:
UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=A_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = self.get_config()
UpperCamelCase = config.num_attention_heads
UpperCamelCase = self.prepare_inputs_dict(A_ , A_ , A_ )
return config, input_dict
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = UMTaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
input_ids=A_ , decoder_input_ids=A_ , attention_mask=A_ , decoder_attention_mask=A_ , )
UpperCamelCase = model(input_ids=A_ , decoder_input_ids=A_ )
UpperCamelCase = result.last_hidden_state
UpperCamelCase = result.past_key_values
UpperCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(A_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> int:
"""simple docstring"""
UpperCamelCase = UMTaModel(config=A_ ).get_decoder().to(A_ ).eval()
# first forward pass
UpperCamelCase = model(A_ , use_cache=A_ )
UpperCamelCase = model(A_ )
UpperCamelCase = model(A_ , use_cache=A_ )
self.parent.assertTrue(len(A_ ) == len(A_ ) )
self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = model(A_ )['last_hidden_state']
UpperCamelCase = model(A_ , past_key_values=A_ )['last_hidden_state']
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = UMTaModel(config=A_ ).to(A_ ).half().eval()
UpperCamelCase = model(**A_ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(A_ ).any().item() )
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowercase : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowercase : Tuple = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : str = True
__lowercase : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowercase : str = [0.8, 0.9]
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = UMTaModel(config_and_inputs[0] ).to(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
A_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=A_ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs[0]
UpperCamelCase = UMTaForConditionalGeneration(A_ ).eval()
model.to(A_ )
UpperCamelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=A_ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A_ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=A_ ),
}
for attn_name, (name, mask) in zip(A_ , head_masking.items() ):
UpperCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=A_ )
UpperCamelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=A_ , return_dict_in_generate=A_ , **A_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=A_ ).to(A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=A_ , legacy=A_ )
UpperCamelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
UpperCamelCase = tokenizer(A_ , return_tensors='pt' , padding=A_ ).input_ids
# fmt: off
UpperCamelCase = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(A_ , A_ )
UpperCamelCase = model.generate(input_ids.to(A_ ) )
UpperCamelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , A_ )
| 222
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222
| 1
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[str] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_= quote(lowerCAmelCase_ )
return hfh.hf_hub_url(lowerCAmelCase_ ,lowerCAmelCase_ ,repo_type="""dataset""" ,revision=lowerCAmelCase_ )
| 277
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 277
| 1
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3 , 4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4 , 5 )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase , lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
A__ , A__ = mock_training_loop_function("hello" )
self.assertListEqual(lowercase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase ):
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase , lowercase , lowercase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.memory_allocated()
A__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase )
A__ = release_memory(lowercase )
self.assertEqual(torch.cuda.memory_allocated() , lowercase )
| 68
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68
| 1
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """linear"""
lowerCAmelCase : int = """cosine"""
lowerCAmelCase : Dict = """cosine_with_restarts"""
lowerCAmelCase : Optional[Any] = """polynomial"""
lowerCAmelCase : Dict = """constant"""
lowerCAmelCase : Any = """constant_with_warmup"""
lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def UpperCamelCase ( _A : Optimizer , _A : int = -1 )-> Dict:
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int = -1 )-> Optional[Any]:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : str , _A : int = -1 )-> Dict:
"""simple docstring"""
A__ = {}
A__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(":" )
A__ = int(_A )
A__ = float(_A )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(_A : Any , _A : Optional[int] ):
def rule_func(_A : int ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : str , _A : str=-1 )-> Tuple:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : float = 0.5 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : int = 1 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple=1E-7 , _A : Dict=1.0 , _A : Union[str, Any]=-1 )-> Any:
"""simple docstring"""
A__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
UpperCAmelCase_ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase ( _A : Union[str, SchedulerType] , _A : Optimizer , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 1.0 , _A : int = -1 , )-> Union[str, Any]:
"""simple docstring"""
A__ = SchedulerType(_A )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 198
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = field
A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths}
A__ = Json(
cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , field=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = path_or_buf
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = "utf-8"
A__ = to_json_kwargs
def __A ( self ):
A__ = self.to_json_kwargs.pop("path_or_buf" , UpperCAmelCase__ )
A__ = self.to_json_kwargs.pop("orient" , "records" )
A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
A__ = self.to_json_kwargs.pop("compression" , UpperCAmelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=UpperCAmelCase__ ) as buffer:
A__ = self._write(file_obj=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
A__ = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
return written
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ , A__ , A__ = args
A__ = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **UpperCAmelCase__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ):
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase__ )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase__ , UpperCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(UpperCAmelCase__ )
return written
| 198
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = AudioLDMPipeline
lowercase__ = TEXT_TO_AUDIO_PARAMS
lowercase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCAmelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=(3_2, 6_4) ,class_embed_type='''simple_projection''' ,projection_class_embeddings_input_dim=3_2 ,class_embeddings_concat=lowercase_ ,)
lowerCAmelCase__ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase_ ,set_alpha_to_one=lowercase_ ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=1 ,out_channels=1 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,projection_dim=3_2 ,)
lowerCAmelCase__ : int = ClapTextModelWithProjection(lowercase_ )
lowerCAmelCase__ : Tuple = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' ,model_max_length=7_7 )
lowerCAmelCase__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6_0_0_0 ,upsample_initial_channel=1_6 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=lowercase_ ,)
lowerCAmelCase__ : Any = SpeechTaHifiGan(lowercase_ )
lowerCAmelCase__ : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __lowerCAmelCase ( self : str ,lowercase_ : Dict ,lowercase_ : Optional[Any]=0 ):
if str(lowercase_ ).startswith('''mps''' ):
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase__ : List[str] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : List[str] = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : str = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 2_5_6
lowerCAmelCase__ : str = audio[:1_0]
lowerCAmelCase__ : str = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Any = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : int = audioldm_pipe.to(lowercase_ )
lowerCAmelCase__ : Tuple = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : List[str] = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ : int = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : List[str] = output.audios[0]
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : str = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ : List[Any] = audioldm_pipe.tokenizer(
lowercase_ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=lowercase_ ,return_tensors='''pt''' ,)
lowerCAmelCase__ : Tuple = text_inputs['''input_ids'''].to(lowercase_ )
lowerCAmelCase__ : List[Any] = audioldm_pipe.text_encoder(
lowercase_ ,)
lowerCAmelCase__ : Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase__ : Tuple = F.normalize(lowercase_ ,dim=-1 )
lowerCAmelCase__ : List[Any] = prompt_embeds
# forward
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : List[str] = audioldm_pipe.to(lowercase_ )
lowerCAmelCase__ : Dict = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : str = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : int = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ : Any = negative_prompt
lowerCAmelCase__ : int = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ : Tuple = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : List[str] = output.audios[0]
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : Optional[int] = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ : Optional[Any] = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ : int = audioldm_pipe.tokenizer(
lowercase_ ,padding='''max_length''' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=lowercase_ ,return_tensors='''pt''' ,)
lowerCAmelCase__ : Optional[Any] = text_inputs['''input_ids'''].to(lowercase_ )
lowerCAmelCase__ : Dict = audioldm_pipe.text_encoder(
lowercase_ ,)
lowerCAmelCase__ : Dict = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase__ : Any = F.normalize(lowercase_ ,dim=-1 )
embeds.append(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : int = embeds
# forward
lowerCAmelCase__ : List[str] = audioldm_pipe(**lowercase_ )
lowerCAmelCase__ : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
lowerCAmelCase__ : Optional[int] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : List[str] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = '''egg cracking'''
lowerCAmelCase__ : Tuple = audioldm_pipe(**lowercase_ ,negative_prompt=lowercase_ )
lowerCAmelCase__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 2_5_6
lowerCAmelCase__ : Any = audio[:1_0]
lowerCAmelCase__ : List[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : int = PNDMScheduler(skip_prk_steps=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Optional[int] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase__ : Dict = audioldm_pipe(lowercase_ ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase__ : Dict = 2
lowerCAmelCase__ : int = audioldm_pipe(lowercase_ ,num_inference_steps=2 ,num_waveforms_per_prompt=lowercase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : List[Any] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=lowercase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : List[Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Any = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase__ : Any = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase__ : Dict = audioldm_pipe(audio_length_in_s=0.016 ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) / vocoder_sampling_rate == 0.016
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 ,**lowercase_ )
lowerCAmelCase__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) / vocoder_sampling_rate == 0.032
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = AudioLDMPipeline(**lowercase_ )
lowerCAmelCase__ : Optional[int] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : str = ['''hey''']
lowerCAmelCase__ : str = audioldm_pipe(lowercase_ ,num_inference_steps=1 )
lowerCAmelCase__ : str = output.audios.shape
assert audio_shape == (1, 2_5_6)
lowerCAmelCase__ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase__ : int = SpeechTaHifiGan(lowercase_ ).to(lowercase_ )
lowerCAmelCase__ : str = audioldm_pipe(lowercase_ ,num_inference_steps=1 )
lowerCAmelCase__ : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def __lowerCAmelCase ( self : Any ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase_ )
def __lowerCAmelCase ( self : Dict ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase_ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __lowerCAmelCase ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_ )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ,lowercase_ : Any ,lowercase_ : Union[str, Any]="cpu" ,lowercase_ : Any=torch.floataa ,lowercase_ : Tuple=0 ):
lowerCAmelCase__ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase__ : str = np.random.RandomState(lowercase_ ).standard_normal((1, 8, 1_2_8, 1_6) )
lowerCAmelCase__ : str = torch.from_numpy(lowercase_ ).to(device=lowercase_ ,dtype=lowercase_ )
lowerCAmelCase__ : List[Any] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Union[str, Any] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowerCAmelCase__ : List[Any] = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.get_inputs(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = 2_5
lowerCAmelCase__ : Optional[Any] = audioldm_pipe(**lowercase_ ).audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 8_1_9_2_0
lowerCAmelCase__ : Optional[Any] = audio[7_7_2_3_0:7_7_2_4_0]
lowerCAmelCase__ : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowerCAmelCase__ : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Optional[int] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowerCAmelCase__ : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCAmelCase__ : str = audioldm_pipe.to(lowercase_ )
audioldm_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.get_inputs(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = audioldm_pipe(**lowercase_ ).audios[0]
assert audio.ndim == 1
assert len(lowercase_ ) == 8_1_9_2_0
lowerCAmelCase__ : List[Any] = audio[2_7_7_8_0:2_7_7_9_0]
lowerCAmelCase__ : Any = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowerCAmelCase__ : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 106
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if "cls_token" in name:
lowercase : List[Any] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase : Tuple = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : int = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase : List[Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase : List[str] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase : int = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
lowercase : int = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
lowercase : Tuple = config.decoder_hidden_size
lowercase : int = """decoder.decoder_layers."""
if "weight" in key:
lowercase : List[Any] = val[:dim, :]
lowercase : Tuple = val[dim : dim * 2, :]
lowercase : List[Any] = val[-dim:, :]
elif "bias" in key:
lowercase : str = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Tuple = config.hidden_size
lowercase : Union[str, Any] = """vit.encoder.layer."""
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Dict = val[-dim:, :]
elif "bias" in key:
lowercase : Any = val[:dim]
lowercase : str = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Union[str, Any] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase : Dict = 1_024
lowercase : str = 4_096
lowercase : Optional[Any] = 24
lowercase : Optional[Any] = 16
elif "huge" in checkpoint_url:
lowercase : int = 14
lowercase : List[Any] = 1_280
lowercase : int = 5_120
lowercase : List[Any] = 32
lowercase : Any = 16
lowercase : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Tuple = ViTMAEImageProcessor(size=config.image_size )
lowercase : Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowercase : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowercase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.logits
if "large" in checkpoint_url:
lowercase : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowercase : Tuple = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowercase : List[str] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : List[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 20
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase : List[str] =Vector()
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case__ ) , '''(0,0,0,0,0,1)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case__ ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2] )
UpperCAmelCase : Union[str, Any] =Vector([1, 2, 3, 4, 5] )
UpperCAmelCase : List[str] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase : Optional[int] =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict =Vector([1, 2, 3] )
UpperCAmelCase : List[Any] =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2, 3] )
UpperCAmelCase : int =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict =Vector([1, 2, 3] )
UpperCAmelCase : Dict =Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase : Union[str, Any] =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : int =Vector([1, 2, 3] )
UpperCAmelCase : Any =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case__ , snake_case__ ) ) , '''(3,4,7)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase : Union[str, Any] =x.copy()
self.assertEqual(str(snake_case__ ) , str(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case__ ) , '''(0,1,0)''' )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : str =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : Union[str, Any] =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case__ , snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase : Any =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : Optional[Any] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase : int =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 78
|
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
'''simple docstring'''
UpperCAmelCase : Any =x
UpperCAmelCase : List[str] =y
for step in range(__lowerCAmelCase ): # noqa: B007
UpperCAmelCase : int =a * a - b * b + x
UpperCAmelCase : Union[str, Any] =2 * a * b + y
UpperCAmelCase : Optional[int] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) )
def lowerCAmelCase_ ( __lowerCAmelCase = 8_00 , __lowerCAmelCase = 6_00 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , )-> Image.Image:
'''simple docstring'''
UpperCAmelCase : Dict =Image.new('''RGB''' , (image_width, image_height) )
UpperCAmelCase : str =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCAmelCase ):
for image_y in range(__lowerCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase : Union[str, Any] =figure_width / image_width * image_height
UpperCAmelCase : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase : str =figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase : int =get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase : Any =get_color_coded_rgb(__lowerCAmelCase )
else:
UpperCAmelCase : Optional[int] =get_black_and_white_rgb(__lowerCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( a_ , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = TransfoXLTokenizer
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
lowercase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = '<unk> UNwanted , running'
lowercase = '<unk> unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A )
lowercase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(_A , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [0, 4, 8, 7] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TransfoXLTokenizer(lower_case=_A )
lowercase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowercase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_A ) , _A )
self.assertEqual(tokenizer.convert_tokens_to_string(_A ) , _A )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = len(_A )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 195
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = "dinat"
__snake_case : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : int=64 ,lowerCamelCase__ : Union[str, Any]=[3, 4, 6, 5] ,lowerCamelCase__ : str=[2, 4, 8, 16] ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase__ : str=3.0 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Tuple=0.02 ,lowerCamelCase__ : List[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : str=None ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Optional[Any] ,) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = dilations
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(lowerCamelCase__ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ ,out_indices=lowerCamelCase__ ,stage_names=self.stage_names )
| 357
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase__ ,""" -> """ ,""" -> """.join([str(lowerCamelCase__ ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase__ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE = [to_vertex]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : list ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
print(lowerCamelCase__ ,end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 193
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_SCREAMING_SNAKE_CASE = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
return 3_2
@property
def lowercase_ ( self : Dict ) ->Tuple:
return 3_2
@property
def lowercase_ ( self : List[str] ) ->int:
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ) ->Optional[int]:
return self.time_input_dim * 4
@property
def lowercase_ ( self : str ) ->List[Any]:
return 1_0_0
@property
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) ->str:
torch.manual_seed(0 )
snake_case__ : Tuple = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, )
snake_case__ : Dict = MultilingualCLIP(_snake_case )
snake_case__ : Any = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Optional[int] = UNetaDConditionModel(**_snake_case )
return model
@property
def lowercase_ ( self : Any ) ->Tuple:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : str ) ->Dict:
torch.manual_seed(0 )
snake_case__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : str ) ->List[Any]:
snake_case__ : str = self.dummy_text_encoder
snake_case__ : Optional[Any] = self.dummy_tokenizer
snake_case__ : List[Any] = self.dummy_unet
snake_case__ : Optional[int] = self.dummy_movq
snake_case__ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='epsilon', thresholding=_snake_case, )
snake_case__ : Union[str, Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self : str, _snake_case : Any, _snake_case : Tuple=0 ) ->str:
snake_case__ : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
snake_case__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : int = image.cpu().permute(0, 2, 3, 1 )[0]
snake_case__ : Dict = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Optional[Any] = np.ones((6_4, 6_4), dtype=np.floataa )
snake_case__ : Optional[Any] = 0
if str(_snake_case ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(_snake_case )
else:
snake_case__ : int = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case__ : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase_ ( self : List[str] ) ->Any:
snake_case__ : Any = 'cpu'
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**_snake_case )
snake_case__ : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Optional[int] = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case__ : int = output.images
snake_case__ : str = pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Any = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowercase_ ( self : Dict ) ->Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : int ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any ) ->Tuple:
snake_case__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Dict = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
snake_case__ : Union[str, Any] = 0
snake_case__ : Optional[Any] = 'a hat'
snake_case__ : Optional[int] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case__ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint', torch_dtype=torch.floataa )
snake_case__ : Any = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ , snake_case__ : List[Any] = pipe_prior(
_snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='', ).to_tuple()
snake_case__ : Dict = pipeline(
_snake_case, image=_snake_case, mask_image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
snake_case__ : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 277
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ :Tuple = logging.get_logger(__name__)
a_ :Union[str, Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deberta-v2"""
def __init__( self : Union[str, Any], _snake_case : Dict=1_2_8_1_0_0, _snake_case : Any=1_5_3_6, _snake_case : Tuple=2_4, _snake_case : int=2_4, _snake_case : Optional[int]=6_1_4_4, _snake_case : Optional[int]="gelu", _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : str=5_1_2, _snake_case : Optional[int]=0, _snake_case : Optional[int]=0.0_2, _snake_case : Dict=1e-7, _snake_case : int=False, _snake_case : Any=-1, _snake_case : List[str]=0, _snake_case : Tuple=True, _snake_case : Any=None, _snake_case : Union[str, Any]=0, _snake_case : Tuple="gelu", **_snake_case : Union[str, Any], ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : Dict = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[int] = relative_attention
snake_case__ : Tuple = max_relative_positions
snake_case__ : Union[str, Any] = pad_token_id
snake_case__ : Optional[int] = position_biased_input
# Backwards compatibility
if type(_snake_case ) == str:
snake_case__ : int = [x.strip() for x in pos_att_type.lower().split('|' )]
snake_case__ : List[str] = pos_att_type
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[int] = kwargs.get('pooler_hidden_size', _snake_case )
snake_case__ : int = pooler_dropout
snake_case__ : str = pooler_hidden_act
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase_ ( self : Dict ) ->int:
return 1_2
def lowercase_ ( self : Tuple, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional["TensorType"] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, _snake_case : "PreTrainedTokenizerBase" = None, ) ->Mapping[str, Any]:
snake_case__ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=_snake_case, framework=_snake_case )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 277
| 1
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
UpperCamelCase_ = quote(SCREAMING_SNAKE_CASE_ )
return hfh.hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" , revision=SCREAMING_SNAKE_CASE_ )
| 60
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> Optional[Any]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase_ ( self , _lowercase = "auto" )-> str:
if slice_size == "auto":
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase=16_000 , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> str:
UpperCamelCase_ = self.speech_processor.feature_extractor(
_lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device )
UpperCamelCase_ = self.speech_model.generate(_lowercase , max_length=480_000 )
UpperCamelCase_ = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[
0
]
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = 1
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = len(_lowercase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowercase )}." )
# get prompt text embeddings
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_embeddings.shape
UpperCamelCase_ = text_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ = 42
if negative_prompt is None:
UpperCamelCase_ = [""] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !="
F" {type(_lowercase )}." )
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = text_input_ids.shape[-1]
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , )
UpperCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ = uncond_embeddings.shape[1]
UpperCamelCase_ = uncond_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to(
self.device )
else:
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ = {}
if accepts_eta:
UpperCamelCase_ = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
UpperCamelCase_ = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = 1 / 0.18_215 * latents
UpperCamelCase_ = self.vae.decode(_lowercase ).sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 60
| 1
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 198
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : str = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[int] = ffn_dropout
lowercase__ : int = attn_dropout
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = MobileViTVaModelTester(self )
lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCAmelCase( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = model.to(__lowerCAmelCase )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__lowerCAmelCase )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : List[str] = model.to(__lowerCAmelCase )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = prepare_img()
lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.detach().cpu()
lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase__ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 198
| 1
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCAmelCase_ = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCAmelCase_ = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( A ) -> List[Any]:
snake_case = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A )[0]
@deprecated(A , 'Please use tf.data to implement this functionality.' )
def __magic_name__ ( A ) -> Optional[int]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A ) as bytestream:
snake_case = _readaa(A )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
snake_case = _readaa(A )
snake_case = _readaa(A )
snake_case = _readaa(A )
snake_case = bytestream.read(rows * cols * num_images )
snake_case = numpy.frombuffer(A , dtype=numpy.uinta )
snake_case = data.reshape(A , A , A , 1 )
return data
@deprecated(A , 'Please use tf.one_hot on tensors.' )
def __magic_name__ ( A , A ) -> Union[str, Any]:
snake_case = labels_dense.shape[0]
snake_case = numpy.arange(A ) * num_classes
snake_case = numpy.zeros((num_labels, num_classes) )
snake_case = 1
return labels_one_hot
@deprecated(A , 'Please use tf.data to implement this functionality.' )
def __magic_name__ ( A , A=False , A=1_0 ) -> Dict:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=A ) as bytestream:
snake_case = _readaa(A )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
snake_case = _readaa(A )
snake_case = bytestream.read(A )
snake_case = numpy.frombuffer(A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A , A )
return labels
class lowerCamelCase :
@deprecated(
lowercase_, 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.', )
def __init__( self, lowercase_, lowercase_, lowercase_=False, lowercase_=False, lowercase_=dtypes.floataa, lowercase_=True, lowercase_=None, ) -> Dict:
snake_case , snake_case = random_seed.get_seed(lowercase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case = dtypes.as_dtype(lowercase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
snake_case = 10000
snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case = images.astype(numpy.floataa )
snake_case = numpy.multiply(lowercase_, 1.0 / 255.0 )
snake_case = images
snake_case = labels
snake_case = 0
snake_case = 0
@property
def _lowerCamelCase ( self ) -> Dict:
return self._images
@property
def _lowerCamelCase ( self ) -> int:
return self._labels
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return self._num_examples
@property
def _lowerCamelCase ( self ) -> List[str]:
return self._epochs_completed
def _lowerCamelCase ( self, lowercase_, lowercase_=False, lowercase_=True ) -> List[str]:
if fake_data:
snake_case = [1] * 784
snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase_ )],
[fake_label for _ in range(lowercase_ )],
)
snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case = self.images[perma]
snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case = self._num_examples - start
snake_case = self._images[start : self._num_examples]
snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case = self.images[perm]
snake_case = self.labels[perm]
# Start next epoch
snake_case = 0
snake_case = batch_size - rest_num_examples
snake_case = self._index_in_epoch
snake_case = self._images[start:end]
snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A , 'Please write your own downloading logic.' )
def __magic_name__ ( A , A , A ) -> str:
if not gfile.Exists(A ):
gfile.MakeDirs(A )
snake_case = os.path.join(A , A )
if not gfile.Exists(A ):
urllib.request.urlretrieve(A , A ) # noqa: S310
with gfile.GFile(A ) as f:
snake_case = f.size()
print('Successfully downloaded' , A , A , 'bytes.' )
return filepath
@deprecated(
A , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __magic_name__ ( A , A=False , A=False , A=dtypes.floataa , A=True , A=5_0_0_0 , A=None , A=DEFAULT_SOURCE_URL , ) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A , one_hot=A , dtype=A , seed=A )
snake_case = fake()
snake_case = fake()
snake_case = fake()
return _Datasets(train=A , validation=A , test=A )
if not source_url: # empty string check
snake_case = DEFAULT_SOURCE_URL
snake_case = 'train-images-idx3-ubyte.gz'
snake_case = 'train-labels-idx1-ubyte.gz'
snake_case = 't10k-images-idx3-ubyte.gz'
snake_case = 't10k-labels-idx1-ubyte.gz'
snake_case = _maybe_download(
A , A , source_url + train_images_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_images(A )
snake_case = _maybe_download(
A , A , source_url + train_labels_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_labels(A , one_hot=A )
snake_case = _maybe_download(
A , A , source_url + test_images_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_images(A )
snake_case = _maybe_download(
A , A , source_url + test_labels_file )
with gfile.Open(A , 'rb' ) as f:
snake_case = _extract_labels(A , one_hot=A )
if not 0 <= validation_size <= len(A ):
snake_case = (
'Validation size should be between 0 and '
F'''{len(A )}. Received: {validation_size}.'''
)
raise ValueError(A )
snake_case = train_images[:validation_size]
snake_case = train_labels[:validation_size]
snake_case = train_images[validation_size:]
snake_case = train_labels[validation_size:]
snake_case = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
snake_case = _DataSet(A , A , **A )
snake_case = _DataSet(A , A , **A )
snake_case = _DataSet(A , A , **A )
return _Datasets(train=A , validation=A , test=A )
| 332
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( ):
UpperCAmelCase = []
UpperCAmelCase = 1
while len(lowercase_ ) < 1e6:
constant.append(str(lowercase_ ) )
i += 1
UpperCAmelCase = ''.join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 78
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 356
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model'''}
__a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__a = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
__a = '''▁'''
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Optional[Any] = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowercase : List[str] = do_lower_case
lowercase : Tuple = remove_space
lowercase : Tuple = keep_accents
lowercase : str = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : List[Any] = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Any = {}
lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.remove_space:
lowercase : int = ''' '''.join(inputs.strip().split() )
else:
lowercase : List[Any] = inputs
lowercase : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase : Optional[Any] = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
lowercase : Union[str, Any] = outputs.lower()
return outputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : Optional[int] = cur_pieces[1:]
else:
lowercase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = []
lowercase : Tuple = ''''''
lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowercase : Union[str, Any] = True
lowercase : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[int] = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 173
| 0
|
'''simple docstring'''
import requests
UpperCamelCase = 'YOUR API KEY'
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = giphy_api_key ) -> list:
A: Optional[Any] = '''+'''.join(query.split() )
A: Tuple = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
A: str = requests.get(UpperCamelCase__ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 319
|
import random
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : List[Any] )->tuple:
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : int )->Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(UpperCamelCase__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(UpperCamelCase__ , UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 193
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[int] = (DDPMParallelScheduler,)
def a ( self : Any , **_lowercase : Optional[int] ):
__UpperCAmelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowercase )
return config
def a ( self : Dict ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def a ( self : int ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def a ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def a ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def a ( self : Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def a ( self : Optional[Any] ):
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def a ( self : Dict ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def a ( self : Optional[Any] ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def a ( self : int ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = self.dummy_sample_deter + 0.1
__UpperCAmelCase = self.dummy_sample_deter - 0.1
__UpperCAmelCase = samplea.shape[0]
__UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCAmelCase = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase )
__UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCAmelCase = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
__UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
__UpperCAmelCase = -1
else:
__UpperCAmelCase = timesteps[i + 1]
__UpperCAmelCase = scheduler.previous_timestep(_lowercase )
__UpperCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [1_00, 87, 50, 1, 0]
__UpperCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 86
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : int = pd.read_csv('sample_data.csv', header=None)
_lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : Any = MinMaxScaler().fit_transform(actual_data)
_lowercase : Dict = 10
_lowercase : List[str] = 5
_lowercase : Any = 20
_lowercase : Optional[int] = len_data - periods * look_back
_lowercase : Optional[int] = actual_data[:division]
_lowercase : Optional[int] = actual_data[division - look_back :]
_lowercase ,_lowercase : Tuple = [], []
_lowercase ,_lowercase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : List[str] = np.array(train_x)
_lowercase : str = np.array(test_x)
_lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : str = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowercase : str = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
_lowercase : str = model.predict(x_test)
| 86
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : int = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Any = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case__ : Tuple = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self : Any , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple="[UNK]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : List[str]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : str="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Tuple , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowerCAmelCase : List[Any] = do_lower_case
lowerCAmelCase : Optional[Any] = strip_accents
lowerCAmelCase : Tuple = tokenize_chinese_chars
lowerCAmelCase : int = normalizer_class(**UpperCamelCase_ )
lowerCAmelCase : Tuple = do_lower_case
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=None ):
lowerCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : int = [self.sep_token_id]
lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 60
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : int = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] ):
lowerCAmelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCAmelCase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCAmelCase : str = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase : str = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
lowerCAmelCase : str = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase : Optional[int] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCAmelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase : Any = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase : Tuple = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
lowerCAmelCase : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase : List[str] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase : List[Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase : List[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase : Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase : int = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase : int = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
lowerCAmelCase : str = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowerCAmelCase : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowerCAmelCase : str = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowerCAmelCase : Any = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowerCAmelCase : List[Any] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCAmelCase : Optional[Any] = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowerCAmelCase : Union[str, Any] = value
return new_state_dict
def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase : Optional[int] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase : str = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase : List[str] = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ):
lowerCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : str = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _snake_case ( _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]=False , _snake_case : List[str]=None ):
lowerCAmelCase : Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Dict = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase : Tuple = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
lowerCAmelCase : str = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
lowerCAmelCase : Union[str, Any] = model(_snake_case )
lowerCAmelCase : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase : str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase : str = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
snake_case__ : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60
| 1
|
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 127
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: Dict = size
SCREAMING_SNAKE_CASE_: int = resample
SCREAMING_SNAKE_CASE_: str = do_rescale
SCREAMING_SNAKE_CASE_: str = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 127
| 1
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowercase : List[str] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowercase : Optional[int] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case_ )[0]
@deprecated(snake_case_ , '''Please use tf.data to implement this functionality.''' )
def lowercase__ ( snake_case_ :Tuple ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=snake_case_ ) as bytestream:
__UpperCAmelCase = _readaa(snake_case_ )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__UpperCAmelCase = _readaa(snake_case_ )
__UpperCAmelCase = _readaa(snake_case_ )
__UpperCAmelCase = _readaa(snake_case_ )
__UpperCAmelCase = bytestream.read(rows * cols * num_images )
__UpperCAmelCase = numpy.frombuffer(snake_case_ , dtype=numpy.uinta )
__UpperCAmelCase = data.reshape(snake_case_ , snake_case_ , snake_case_ , 1 )
return data
@deprecated(snake_case_ , '''Please use tf.one_hot on tensors.''' )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Optional[int] ):
__UpperCAmelCase = labels_dense.shape[0]
__UpperCAmelCase = numpy.arange(snake_case_ ) * num_classes
__UpperCAmelCase = numpy.zeros((num_labels, num_classes) )
__UpperCAmelCase = 1
return labels_one_hot
@deprecated(snake_case_ , '''Please use tf.data to implement this functionality.''' )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :int=False , snake_case_ :Optional[int]=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=snake_case_ ) as bytestream:
__UpperCAmelCase = _readaa(snake_case_ )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__UpperCAmelCase = _readaa(snake_case_ )
__UpperCAmelCase = bytestream.read(snake_case_ )
__UpperCAmelCase = numpy.frombuffer(snake_case_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case_ , snake_case_ )
return labels
class _UpperCAmelCase :
@deprecated(
_lowercase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Tuple , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict=False , _lowercase : Tuple=False , _lowercase : Optional[int]=dtypes.floataa , _lowercase : Union[str, Any]=True , _lowercase : Dict=None , ):
__UpperCAmelCase , __UpperCAmelCase = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCAmelCase = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__UpperCAmelCase = 1_00_00
__UpperCAmelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__UpperCAmelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCAmelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCAmelCase = images.astype(numpy.floataa )
__UpperCAmelCase = numpy.multiply(_lowercase , 1.0 / 255.0 )
__UpperCAmelCase = images
__UpperCAmelCase = labels
__UpperCAmelCase = 0
__UpperCAmelCase = 0
@property
def a ( self : Dict ):
return self._images
@property
def a ( self : Optional[Any] ):
return self._labels
@property
def a ( self : Dict ):
return self._num_examples
@property
def a ( self : Dict ):
return self._epochs_completed
def a ( self : Any , _lowercase : str , _lowercase : Dict=False , _lowercase : str=True ):
if fake_data:
__UpperCAmelCase = [1] * 7_84
__UpperCAmelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
__UpperCAmelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
__UpperCAmelCase = self.images[perma]
__UpperCAmelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCAmelCase = self._num_examples - start
__UpperCAmelCase = self._images[start : self._num_examples]
__UpperCAmelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
__UpperCAmelCase = self.images[perm]
__UpperCAmelCase = self.labels[perm]
# Start next epoch
__UpperCAmelCase = 0
__UpperCAmelCase = batch_size - rest_num_examples
__UpperCAmelCase = self._index_in_epoch
__UpperCAmelCase = self._images[start:end]
__UpperCAmelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCAmelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case_ , '''Please write your own downloading logic.''' )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :List[str] ):
if not gfile.Exists(snake_case_ ):
gfile.MakeDirs(snake_case_ )
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if not gfile.Exists(snake_case_ ):
urllib.request.urlretrieve(snake_case_ , snake_case_ ) # noqa: S310
with gfile.GFile(snake_case_ ) as f:
__UpperCAmelCase = f.size()
print('''Successfully downloaded''' , snake_case_ , snake_case_ , '''bytes.''' )
return filepath
@deprecated(
snake_case_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str]=False , snake_case_ :List[Any]=False , snake_case_ :List[str]=dtypes.floataa , snake_case_ :str=True , snake_case_ :Optional[int]=5_000 , snake_case_ :str=None , snake_case_ :str=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case_ , one_hot=snake_case_ , dtype=snake_case_ , seed=snake_case_ )
__UpperCAmelCase = fake()
__UpperCAmelCase = fake()
__UpperCAmelCase = fake()
return _Datasets(train=snake_case_ , validation=snake_case_ , test=snake_case_ )
if not source_url: # empty string check
__UpperCAmelCase = DEFAULT_SOURCE_URL
__UpperCAmelCase = '''train-images-idx3-ubyte.gz'''
__UpperCAmelCase = '''train-labels-idx1-ubyte.gz'''
__UpperCAmelCase = '''t10k-images-idx3-ubyte.gz'''
__UpperCAmelCase = '''t10k-labels-idx1-ubyte.gz'''
__UpperCAmelCase = _maybe_download(
snake_case_ , snake_case_ , source_url + train_images_file )
with gfile.Open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = _extract_images(snake_case_ )
__UpperCAmelCase = _maybe_download(
snake_case_ , snake_case_ , source_url + train_labels_file )
with gfile.Open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = _extract_labels(snake_case_ , one_hot=snake_case_ )
__UpperCAmelCase = _maybe_download(
snake_case_ , snake_case_ , source_url + test_images_file )
with gfile.Open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = _extract_images(snake_case_ )
__UpperCAmelCase = _maybe_download(
snake_case_ , snake_case_ , source_url + test_labels_file )
with gfile.Open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = _extract_labels(snake_case_ , one_hot=snake_case_ )
if not 0 <= validation_size <= len(snake_case_ ):
__UpperCAmelCase = (
'''Validation size should be between 0 and '''
F'''{len(snake_case_ )}. Received: {validation_size}.'''
)
raise ValueError(snake_case_ )
__UpperCAmelCase = train_images[:validation_size]
__UpperCAmelCase = train_labels[:validation_size]
__UpperCAmelCase = train_images[validation_size:]
__UpperCAmelCase = train_labels[validation_size:]
__UpperCAmelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__UpperCAmelCase = _DataSet(snake_case_ , snake_case_ , **snake_case_ )
__UpperCAmelCase = _DataSet(snake_case_ , snake_case_ , **snake_case_ )
__UpperCAmelCase = _DataSet(snake_case_ , snake_case_ , **snake_case_ )
return _Datasets(train=snake_case_ , validation=snake_case_ , test=snake_case_ )
| 332
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
if tokenize_kwargs is None:
__UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__UpperCAmelCase = truncation
__UpperCAmelCase = tokenize_kwargs
__UpperCAmelCase = {}
if return_tensors is not None:
__UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def a ( self : List[str] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : int , _lowercase : Tuple , _lowercase : str=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
return super().__call__(*_lowercase , **_lowercase )
| 332
| 1
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _lowerCamelCase ( _lowercase , _lowercase ):
UpperCAmelCase_ = "pixel_values"
UpperCAmelCase_ = False
UpperCAmelCase_ = TimmBackboneConfig
def __init__(self , __a , **__a ) -> Tuple:
requires_backends(self , "timm" )
super().__init__(__a )
UpperCamelCase = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(__a , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
UpperCamelCase = getattr(__a , "use_pretrained_backbone" , __a )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase = config.out_indices if getattr(__a , "out_indices" , __a ) is not None else (-1,)
UpperCamelCase = timm.create_model(
config.backbone , pretrained=__a , features_only=config.features_only , in_chans=config.num_channels , out_indices=__a , **__a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase = self._backbone.return_layers
UpperCamelCase = {layer["module"]: str(__a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__a )
@classmethod
def snake_case_ (cls , __a , *__a , **__a ) -> Tuple:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase = kwargs.pop("config" , TimmBackboneConfig() )
UpperCamelCase = kwargs.pop("use_timm_backbone" , __a )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
UpperCamelCase = kwargs.pop("num_channels" , config.num_channels )
UpperCamelCase = kwargs.pop("features_only" , config.features_only )
UpperCamelCase = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
UpperCamelCase = kwargs.pop("out_indices" , config.out_indices )
UpperCamelCase = TimmBackboneConfig(
backbone=__a , num_channels=__a , features_only=__a , use_pretrained_backbone=__a , out_indices=__a , )
return super()._from_config(__a , **__a )
def snake_case_ (self , __a ) -> Optional[int]:
pass
def snake_case_ (self , __a , __a=None , __a=None , __a=None , **__a ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase = self._all_layers
UpperCamelCase = self._backbone(__a , **__a )
UpperCamelCase = self._return_layers
UpperCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase = self._backbone(__a , **__a )
UpperCamelCase = None
UpperCamelCase = tuple(__a )
UpperCamelCase = tuple(__a ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase = (feature_maps,)
if output_hidden_states:
UpperCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__a , hidden_states=__a , attentions=__a )
| 244
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCamelCase = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_55.0
UpperCamelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase = 2.0 * image - 1.0
UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return image
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.99_95 ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase = True
UpperCamelCase = va.device
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = va.cpu().numpy()
UpperCamelCase = np.sum(va * va / (np.linalg.norm(_SCREAMING_SNAKE_CASE ) * np.linalg.norm(_SCREAMING_SNAKE_CASE )) )
if np.abs(_SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
UpperCamelCase = (1 - t) * va + t * va
else:
UpperCamelCase = np.arccos(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.sin(_SCREAMING_SNAKE_CASE )
UpperCamelCase = theta_a * t
UpperCamelCase = np.sin(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase = sin_theta_t / sin_theta_a
UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
return va
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
UpperCamelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in model.parameters():
UpperCamelCase = value
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> Tuple:
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a )
else feature_extractor.size["shortest_edge"]
)
UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __a )
set_requires_grad(self.clip_model , __a )
def snake_case_ (self , __a = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case_ (self ) -> Optional[int]:
self.enable_attention_slicing(__a )
def snake_case_ (self ) -> int:
set_requires_grad(self.vae , __a )
def snake_case_ (self ) -> Optional[Any]:
set_requires_grad(self.vae , __a )
def snake_case_ (self ) -> Optional[int]:
set_requires_grad(self.unet , __a )
def snake_case_ (self ) -> str:
set_requires_grad(self.unet , __a )
def snake_case_ (self , __a , __a , __a ) -> str:
# get the original timestep using init_timestep
UpperCamelCase = min(int(num_inference_steps * strength ) , __a )
UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ (self , __a , __a , __a , __a , __a , __a=None ) -> Tuple:
if not isinstance(__a , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(__a )}" )
UpperCamelCase = image.to(device=__a , dtype=__a )
if isinstance(__a , __a ):
UpperCamelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__a )
]
UpperCamelCase = torch.cat(__a , dim=0 )
else:
UpperCamelCase = self.vae.encode(__a ).latent_dist.sample(__a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 0.18215 * init_latents
UpperCamelCase = init_latents.repeat_interleave(__a , dim=0 )
UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a )
# get latents
UpperCamelCase = self.scheduler.add_noise(__a , __a , __a )
UpperCamelCase = init_latents
return latents
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = self.coca_transform(__a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def snake_case_ (self , __a , __a ) -> Union[str, Any]:
UpperCamelCase = self.feature_extractor.preprocess(__a )
UpperCamelCase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase = self.clip_model.get_image_features(__a )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a )
UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , ) -> List[str]:
UpperCamelCase = latents.detach().requires_grad_()
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase = torch.sqrt(__a )
UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a ):
UpperCamelCase = self.scheduler.sigmas[index]
UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * sample
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = transforms.Resize(self.feature_extractor_size )(__a )
UpperCamelCase = self.normalize(__a ).to(latents.dtype )
UpperCamelCase = self.clip_model.get_image_features(__a )
UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a )
UpperCamelCase = spherical_dist_loss(__a , __a ).mean() * clip_guidance_scale
UpperCamelCase = -torch.autograd.grad(__a , __a )[0]
if isinstance(self.scheduler , __a ):
UpperCamelCase = latents.detach() + grads * (sigma**2)
UpperCamelCase = noise_pred_original
else:
UpperCamelCase = noise_pred_original - torch.sqrt(__a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> List[Any]:
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(__a )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(__a , torch.Generator ) and batch_size > 1:
UpperCamelCase = [generator] + [None] * (batch_size - 1)
UpperCamelCase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase = ", ".join(__a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(__a )
if style_prompt is None:
if len(__a ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase = self.get_image_description(__a )
# get prompt text embeddings for content and style
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = self.tokenizer(
__a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase = slerp(__a , __a , __a )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0 )
# set timesteps
UpperCamelCase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase = {}
if accepts_offset:
UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase , UpperCamelCase = self.get_timesteps(__a , __a , self.device )
UpperCamelCase = timesteps[:1].repeat(__a )
# Preprocess image
UpperCamelCase = preprocess(__a , __a , __a )
UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a )
UpperCamelCase = preprocess(__a , __a , __a )
UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a )
UpperCamelCase = slerp(__a , __a , __a )
if clip_guidance_scale > 0:
UpperCamelCase = self.get_clip_image_embeddings(__a , __a )
UpperCamelCase = self.get_clip_image_embeddings(__a , __a )
UpperCamelCase = slerp(
__a , __a , __a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = content_text_input.input_ids.shape[-1]
UpperCamelCase = self.tokenizer([""] , padding="max_length" , max_length=__a , return_tensors="pt" )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase = torch.randn(__a , generator=__a , device="cpu" , dtype=__a ).to(
self.device )
else:
UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
# check if the scheduler accepts generator
UpperCamelCase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase = generator
with self.progress_bar(total=__a ):
for i, t in enumerate(__a ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase , UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase = 1 / 0.18215 * latents
UpperCamelCase = self.vae.decode(__a ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a )
| 244
| 1
|
'''simple docstring'''
lowerCamelCase : List[str] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : int = KandinskyInpaintPipeline
UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
UpperCamelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
UpperCamelCase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase : Tuple = False
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return 100
@property
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_: List[str] =MultilingualCLIP(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(**lowerCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_unet
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.dummy_movq
SCREAMING_SNAKE_CASE_: int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase )
# create init_image
SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: List[str] =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE_: Dict =np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: Optional[Any] =0
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu"""
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] =self.pipeline_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =output.images
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(
**self.get_dummy_inputs(lowerCAmelCase ) , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE_: List[str] =np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""a hat"""
SCREAMING_SNAKE_CASE_: str =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: List[str] =pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =pipe_prior(
lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE_: List[Any] =pipeline(
lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , image_embeds=lowerCAmelCase , negative_image_embeds=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 173
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 359
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2
| 0
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ():
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
| 1
|
"""simple docstring"""
import math
def A ( snake_case :int ) -> int:
if not isinstance(snake_case , snake_case ):
__UpperCamelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 1:
__UpperCamelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , snake_case ):
for _ in range(snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
UpperCamelCase : Optional[int] = 0
try:
UpperCamelCase : Any = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 359
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( snake_case :str , snake_case :str = "cpu" , snake_case :Union[str, None] = None ) -> None:
__UpperCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 263
| 0
|
import math
import sys
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if number != int(UpperCamelCase_ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
snake_case = [-1] * (number + 1)
snake_case = 0
for i in range(1 ,number + 1 ):
snake_case = sys.maxsize
snake_case = int(math.sqrt(UpperCamelCase_ ) )
for j in range(1 ,root + 1 ):
snake_case = 1 + answers[i - (j**2)]
snake_case = min(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
|
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = end_pointa[0] - end_pointa[0]
snake_case = end_pointa[1] - end_pointa[1]
snake_case = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return tuple(round(UpperCamelCase_ ,UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 10 ):
"""simple docstring"""
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ ,UpperCamelCase_ ) ,UpperCamelCase_ )
| 127
| 1
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__A : int = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__A : List[str] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__A : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : int )->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=None )->Dict:
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase ) ),
}
| 326
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
requires_backends(self , """decord""" )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase__ = {}
if frame_sampling_rate is not None:
UpperCamelCase__ = frame_sampling_rate
if num_frames is not None:
UpperCamelCase__ = num_frames
UpperCamelCase__ = {}
if top_k is not None:
UpperCamelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 ):
if num_frames is None:
UpperCamelCase__ = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
UpperCamelCase__ = BytesIO(requests.get(SCREAMING_SNAKE_CASE_ ).content )
UpperCamelCase__ = VideoReader(SCREAMING_SNAKE_CASE_ )
videoreader.seek(0 )
UpperCamelCase__ = 0
UpperCamelCase__ = num_frames * frame_sampling_rate - 1
UpperCamelCase__ = np.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num=SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
UpperCamelCase__ = videoreader.get_batch(SCREAMING_SNAKE_CASE_ ).asnumpy()
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ )
return model_outputs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 ):
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
| 244
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sgugger/tiny-distilbert-classification"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , only_pretrain_model=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE_ , save_to_csv=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """env.csv""" ) , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """env.csv""" ) ).exists() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE_ ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """sequential""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """cumulative""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """current""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE_ , """log.txt""" ) , log_print=SCREAMING_SNAKE_CASE_ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE_ , eager_mode=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = TensorFlowBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """log.txt""" ) ).exists() )
| 244
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> int:
__lowerCAmelCase = [1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0, 0, 0
__lowerCAmelCase = ugly_nums[ia] * 2
__lowerCAmelCase = ugly_nums[ia] * 3
__lowerCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , lowercase ):
__lowerCAmelCase = min(lowercase , lowercase , lowercase )
ugly_nums.append(lowercase )
if next_num == next_a:
ia += 1
__lowerCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowerCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowerCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_0_0) = }')
| 46
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : str =KandinskyVaaInpaintPipeline
a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a : str =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Dict =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase = np.ones((64, 64),dtype=np.floataa )
__lowerCAmelCase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ :Tuple = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(lowercase_ )
class __A ( lowercase_ ):
"""simple docstring"""
UpperCamelCase__ : Any ="""rag"""
UpperCamelCase__ : List[Any] =True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=" / " , lowerCamelCase__=" // " , lowerCamelCase__=5 , lowerCamelCase__=300 , lowerCamelCase__=768 , lowerCamelCase__=8 , lowerCamelCase__="wiki_dpr" , lowerCamelCase__="train" , lowerCamelCase__="compressed" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(
bos_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , prefix=lowerCamelCase__ , vocab_size=lowerCamelCase__ , **lowerCamelCase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase : Dict =kwargs.pop('question_encoder' )
__UpperCamelCase : Any =question_encoder_config.pop('model_type' )
__UpperCamelCase : Optional[Any] =kwargs.pop('generator' )
__UpperCamelCase : Union[str, Any] =decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase : List[str] =AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Tuple =AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =reduce_loss
__UpperCamelCase : Optional[int] =label_smoothing
__UpperCamelCase : Optional[int] =exclude_bos_score
__UpperCamelCase : int =do_marginalize
__UpperCamelCase : str =title_sep
__UpperCamelCase : Tuple =doc_sep
__UpperCamelCase : List[str] =n_docs
__UpperCamelCase : List[str] =max_combined_length
__UpperCamelCase : Any =dataset
__UpperCamelCase : Union[str, Any] =dataset_split
__UpperCamelCase : List[str] =index_name
__UpperCamelCase : List[Any] =retrieval_vector_size
__UpperCamelCase : str =retrieval_batch_size
__UpperCamelCase : List[str] =passages_path
__UpperCamelCase : Union[str, Any] =index_path
__UpperCamelCase : List[str] =use_dummy_dataset
__UpperCamelCase : Union[str, Any] =output_retrieved
__UpperCamelCase : List[str] =do_deduplication
__UpperCamelCase : Tuple =use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase : int =getattr(self.generator , 'forced_eos_token_id' , lowerCamelCase__ )
@classmethod
def __lowercase ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =copy.deepcopy(self.__dict__ )
__UpperCamelCase : Any =self.question_encoder.to_dict()
__UpperCamelCase : Optional[Any] =self.generator.to_dict()
__UpperCamelCase : Optional[int] =self.__class__.model_type
return output
| 71
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2
| 0
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=2 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_6 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1_0_0_0 , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Tuple = image_size
UpperCamelCase_: List[Any] = patch_size
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Optional[Any] = use_input_mask
UpperCamelCase_: Optional[int] = use_token_type_ids
UpperCamelCase_: List[str] = use_labels
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: List[str] = hidden_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Tuple = hidden_act
UpperCamelCase_: int = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Dict = type_vocab_size
UpperCamelCase_: Dict = type_sequence_label_size
UpperCamelCase_: str = initializer_range
UpperCamelCase_: Any = coordinate_size
UpperCamelCase_: List[str] = shape_size
UpperCamelCase_: List[str] = num_labels
UpperCamelCase_: Any = num_choices
UpperCamelCase_: Any = scope
UpperCamelCase_: List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase_: Any = text_seq_length
UpperCamelCase_: Tuple = (image_size // patch_size) ** 2 + 1
UpperCamelCase_: int = self.text_seq_length + self.image_seq_length
def _a ( self ):
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase_: List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase_: List[str] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase_: Dict = bbox[i, j, 3]
UpperCamelCase_: Union[str, Any] = bbox[i, j, 1]
UpperCamelCase_: Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase_: Union[str, Any] = bbox[i, j, 2]
UpperCamelCase_: Optional[Any] = bbox[i, j, 0]
UpperCamelCase_: Tuple = tmp_coordinate
UpperCamelCase_: Tuple = tf.constant(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: str = None
if self.use_input_mask:
UpperCamelCase_: List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase_: str = None
if self.use_token_type_ids:
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase_: int = None
UpperCamelCase_: Optional[Any] = None
if self.use_labels:
UpperCamelCase_: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase_: List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = TFLayoutLMvaModel(config=_lowerCamelCase )
# text + image
UpperCamelCase_: Tuple = model(_lowerCamelCase , pixel_values=_lowerCamelCase , training=_lowerCamelCase )
UpperCamelCase_: Any = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , training=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase_: Tuple = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase_: Dict = model({'pixel_values': pixel_values} , training=_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = self.num_labels
UpperCamelCase_: Optional[Any] = TFLayoutLMvaForSequenceClassification(config=_lowerCamelCase )
UpperCamelCase_: Optional[int] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = self.num_labels
UpperCamelCase_: int = TFLayoutLMvaForTokenClassification(config=_lowerCamelCase )
UpperCamelCase_: List[str] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = 2
UpperCamelCase_: Optional[Any] = TFLayoutLMvaForQuestionAnswering(config=_lowerCamelCase )
UpperCamelCase_: Dict = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , training=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_) ,(UpperCamelCase_)): Optional[Any] = config_and_inputs
UpperCamelCase_: Union[str, Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Any =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a : Any =(
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a : List[Any] =False
a : Optional[Any] =False
a : List[Any] =False
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return True
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
UpperCamelCase_: Tuple = copy.deepcopy(_lowerCamelCase )
if model_class in get_values(_lowerCamelCase ):
UpperCamelCase_: int = {
k: tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowerCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCamelCase ):
UpperCamelCase_: Optional[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCamelCase ):
UpperCamelCase_: Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase_: int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCamelCase ):
UpperCamelCase_: Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCamelCase ):
UpperCamelCase_: List[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _a ( self ):
UpperCamelCase_: str = TFLayoutLMvaModelTester(self )
UpperCamelCase_: Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: List[Any] = model_class(_lowerCamelCase )
if getattr(_lowerCamelCase , 'hf_compute_loss' , _lowerCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase_: List[str] = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCamelCase_: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowerCamelCase )[0]
]
UpperCamelCase_: int = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase_: str = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCamelCase_: Dict = prepared_for_class.pop('input_ids' )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , **_lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase_: Optional[int] = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCamelCase_: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
UpperCamelCase_: Any = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase_: List[str] = -1_0_0
UpperCamelCase_: List[Any] = tf.convert_to_tensor(_lowerCamelCase )
UpperCamelCase_: List[Any] = model(_lowerCamelCase , **_lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase_: int = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase_: List[str] = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase_: Dict = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase_: Any = inspect.signature(model.call ).parameters
UpperCamelCase_: Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase_: List[Any] = {0: 'input_ids'}
for label_key in label_keys:
UpperCamelCase_: Optional[Any] = signature_names.index(_lowerCamelCase )
UpperCamelCase_: List[Any] = label_key
UpperCamelCase_: Union[str, Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase_: Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase_: List[Any] = prepared_for_class[value]
UpperCamelCase_: Union[str, Any] = tuple(_lowerCamelCase )
# Send to model
UpperCamelCase_: List[str] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _a ( self ):
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_: Dict = type
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: str = TFLayoutLMvaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case () -> Any:
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) if is_vision_available() else None
@slow
def _a ( self ):
UpperCamelCase_: str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
UpperCamelCase_: Optional[Any] = self.default_image_processor
UpperCamelCase_: List[Any] = prepare_img()
UpperCamelCase_: Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='tf' ).pixel_values
UpperCamelCase_: List[Any] = tf.constant([[1, 2]] )
UpperCamelCase_: Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase_: str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
UpperCamelCase_: Dict = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase )
UpperCamelCase_: Optional[int] = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 292
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: Any = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='crop_size' )
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: Tuple = do_rescale
UpperCamelCase_: Dict = do_normalize
UpperCamelCase_: Optional[int] = do_center_crop
UpperCamelCase_: Tuple = crop_size
UpperCamelCase_: Optional[int] = size
UpperCamelCase_: Dict = resample
UpperCamelCase_: Tuple = rescale_factor
UpperCamelCase_: Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_: Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
UpperCamelCase_: str = get_resize_output_image_size(_lowerCamelCase , size=size['shortest_edge'] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_: Any = (size['height'], size['width'])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['height'], size['width']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase , param_name='crop_size' , default_to_square=_lowerCamelCase )
UpperCamelCase_: Tuple = resample if resample is not None else self.resample
UpperCamelCase_: str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Dict = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: Dict = image_std if image_std is not None else self.image_std
UpperCamelCase_: Any = size if size is not None else self.size
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
UpperCamelCase_: Dict = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: List[str] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_: List[str] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_: int = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCamelCase_: List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: Optional[int] = {'pixel_values': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 292
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.