code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case_ = None
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
snake_case_ = {
'google/rembert': 256,
}
snake_case_ = '▁'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__(self : Union[str, Any] , a__ : List[str]=None , a__ : Dict=None , a__ : Union[str, Any]=True , a__ : Tuple=True , a__ : Dict=False , a__ : Tuple="[CLS]" , a__ : Tuple="[SEP]" , a__ : List[str]="<unk>" , a__ : str="[SEP]" , a__ : Any="<pad>" , a__ : List[Any]="[CLS]" , a__ : str="[MASK]" , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def a (self : str , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : Any , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(a__ ) )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 24
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287
| 1
|
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return lst
a__ : Any =1
while i < len(SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a__ , a__ : Union[str, Any] =lst[i], lst[i - 1]
i -= 1
if i == 0:
a__ : Any =1
return lst
if __name__ == "__main__":
UpperCAmelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 148
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : List[Any] ={
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : List[Any] ={
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a__ : Optional[int] =f'''{src_lang}-{tgt_lang}'''
a__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
a__ : Tuple =os.path.join(SCREAMING_SNAKE_CASE , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
UpperCAmelCase : str = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase : Dict = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = model_name.split("""-""")
UpperCAmelCase : Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 148
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''GLPNFeatureExtractor''']
__lowercase = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = "ViltImageProcessor"
SCREAMING_SNAKE_CASE__ : int = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
_lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a__ , )
_lowerCamelCase = kwargs.pop('feature_extractor' )
_lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a__ , a__ )
_lowerCamelCase = self.image_processor
def __call__( self , a__ , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ):
_lowerCamelCase = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel_values + pixel_mask
_lowerCamelCase = self.image_processor(a__ , return_tensors=a__ )
encoding.update(a__ )
return encoding
def snake_case_ ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case_ ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case_ ( self ):
_lowerCamelCase = self.tokenizer.model_input_names
_lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a__ , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a__ , )
return self.image_processor
| 353
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __a :
def __init__( self , a__=None , a__=None ):
# Input as list
_lowerCamelCase = list(poly_a or [0] )[:]
_lowerCamelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_lowerCamelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_lowerCamelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_lowerCamelCase = self.__multiply()
def snake_case_ ( self , a__ ):
_lowerCamelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(a__ ) <= 1:
return dft[0]
#
_lowerCamelCase = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase = [[] for i in range(a__ )]
_lowerCamelCase = self.root**next_ncol
# First half of next step
_lowerCamelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_lowerCamelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_lowerCamelCase = new_dft
_lowerCamelCase = next_ncol // 2
return dft[0]
def snake_case_ ( self ):
_lowerCamelCase = self.__dft('A' )
_lowerCamelCase = self.__dft('B' )
_lowerCamelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase = [[] for i in range(a__ )]
_lowerCamelCase = self.root ** (next_ncol // 2)
_lowerCamelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_lowerCamelCase = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
_lowerCamelCase = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_lowerCamelCase = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_lowerCamelCase = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 178
|
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> None:
snake_case_ = set_counts
snake_case_ = max(a )
snake_case_ = len(a )
snake_case_ = [1] * num_sets
snake_case_ = list(range(a ) )
def _UpperCamelCase ( self , a , a ) -> bool:
snake_case_ = self.get_parent(a )
snake_case_ = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case_ = 0
snake_case_ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case_ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case_ = 0
snake_case_ = src_parent
snake_case_ = self.set_counts[src_parent]
snake_case_ = max(self.max_set , a )
return True
def _UpperCamelCase ( self , a ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
snake_case_ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 178
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase :
"""simple docstring"""
_a = XGLMConfig
_a = {}
_a = """gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=14 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Optional[Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :Any = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Optional[int] = d_model
UpperCamelCase__ :Union[str, Any] = num_hidden_layers
UpperCamelCase__ :str = num_attention_heads
UpperCamelCase__ :int = ffn_dim
UpperCamelCase__ :Optional[Any] = activation_function
UpperCamelCase__ :Optional[int] = activation_dropout
UpperCamelCase__ :Any = attention_dropout
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Union[str, Any] = initializer_range
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :Any = 1
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :int = self.get_config()
UpperCamelCase__ :str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowerCamelCase , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.prepare_config_and_inputs()
(
UpperCamelCase__
) :Optional[Any] = config_and_inputs
UpperCamelCase__ :Union[str, Any] = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowercase ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_a = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_a = (TFXGLMForCausalLM,) if is_tf_available() else ()
_a = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = TFXGLMModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[int] = TFXGLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self , UpperCamelCase_=True ):
'''simple docstring'''
UpperCamelCase__ :Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ :str = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase__ :Optional[Any] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
UpperCamelCase__ :str = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ :Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase__ :List[str] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase__ :Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase__ :Any = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , seed=[7, 0] )
UpperCamelCase__ :Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase__ :Optional[int] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ :List[str] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ :Tuple = """left"""
# use different length sentences to test batching
UpperCamelCase__ :List[str] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
UpperCamelCase__ :str = tokenizer(__lowerCamelCase , return_tensors='''tf''' , padding=__lowerCamelCase )
UpperCamelCase__ :Optional[int] = inputs["""input_ids"""]
UpperCamelCase__ :Union[str, Any] = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase__ :Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Dict = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase__ :List[str] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :int = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase__ :Any = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCamelCase__ :Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase__ :List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase__ :str = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
| 368
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt'''}
__snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__snake_case = {
'''openbmb/cpm-ant-10b''': 1024,
}
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[str] = collections.OrderedDict()
with open(__a , '''r''' , encoding='''utf-8''' ) as reader:
UpperCamelCase__ :Dict = reader.readlines()
for index, token in enumerate(__a ):
UpperCamelCase__ :str = token.rstrip('''\n''' )
UpperCamelCase__ :Optional[int] = index
return vocab
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<unk>" , UpperCamelCase_=200 ):
'''simple docstring'''
UpperCamelCase__ :Tuple = vocab
UpperCamelCase__ :List[str] = unk_token
UpperCamelCase__ :Tuple = max_input_chars_per_word
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :str = []
while start < len(UpperCamelCase_ ):
UpperCamelCase__ :int = len(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = None
while start < end:
UpperCamelCase__ :int = ''''''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase__ :List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
UpperCamelCase__ :Any = end
return sub_tokens
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
_a = False
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<d>" , UpperCamelCase_="</d>" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<unk>" , UpperCamelCase_="</n>" , UpperCamelCase_="</_>" , UpperCamelCase_="left" , **UpperCamelCase_ , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Tuple = bod_token
UpperCamelCase__ :Dict = eod_token
UpperCamelCase__ :Optional[int] = load_vocab(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.encoder[space_token]
UpperCamelCase__ :List[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase__ :Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
UpperCamelCase__ :Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ :List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [i for i in token_ids if i >= 0]
UpperCamelCase__ :Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return token in self.encoder
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if os.path.isdir(UpperCamelCase_ ):
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase__ :str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase__ :Any = 0
if " " in self.encoder:
UpperCamelCase__ :Dict = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase__ :List[str] = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase__ :List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase__ :Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ ))
| 219
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase=32 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(0 )
A__ = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
A__ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
A__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowercase , )
A__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
A__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
A__ = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
A__ = [torch.randint(0 , 1000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
A__ , A__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
A__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ = model(lowercase , timesteps[i] ).sample
A__ = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
A__ , A__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
A__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ = model(lowercase , timesteps[i] ).sample
A__ = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-5 ) )
| 68
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.pruning_method
_UpperCAmelCase = args.threshold
_UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
_UpperCAmelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
_UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1
_UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = s * (r - l) + l
_UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCAmelCase = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Optional[int] = parser.parse_args()
main(args)
| 260
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__lowerCamelCase : Optional[Any] = """src/transformers"""
__lowerCamelCase : Optional[Any] = """docs/source/en/tasks"""
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
with open(_lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase : str = f.readlines()
# Find the start prompt.
UpperCamelCase : int = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
UpperCamelCase : int = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
__lowerCamelCase : Tuple = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__lowerCamelCase : List[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : Dict = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() )
UpperCamelCase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
UpperCamelCase : int = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCamelCase : str = get_model_list_for_task(_lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCamelCase : Union[str, Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 366
|
__lowerCamelCase : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : str = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
UpperCamelCase : Optional[int] = "".join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
UpperCamelCase : str = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase : Optional[Any] = b"=" * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
UpperCamelCase : List[Any] = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : List[Any] = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
UpperCamelCase : Any = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCamelCase : Union[str, Any] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase : List[str] = encoded_data[:-padding]
UpperCamelCase : List[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase : List[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase : Any = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A__ :
def __init__( self : int , _a : Optional[int] , _a : Optional[Any]=13 , _a : List[Any]=7 , _a : List[Any]=True , _a : str=True , _a : Dict=False , _a : str=True , _a : List[str]=99 , _a : str=64 , _a : Union[str, Any]=5 , _a : List[Any]=4 , _a : Optional[Any]=64 , _a : Tuple="gelu" , _a : List[str]=0.1 , _a : str=0.1 , _a : int=512 , _a : Dict=16 , _a : List[str]=2 , _a : int=0.02 , _a : Union[str, Any]=3 , _a : str=4 , _a : Tuple=None , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
def A ( self : str ) -> Tuple:
'''simple docstring'''
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def A ( self : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Dict ) -> Dict:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Tuple , _a : Dict , _a : Dict , _a : Dict , _a : int , _a : Optional[int] , _a : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MPNetModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Union[str, Any] , _a : List[Any] , _a : Dict , _a : List[Any] , _a : Any , _a : List[Any] , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , _a : Any , _a : List[Any] , _a : List[Any] , _a : Dict , _a : List[str] , _a : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _a : int , _a : Optional[Any] , _a : List[Any] , _a : Tuple , _a : Tuple , _a : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Any , _a : int , _a : Optional[Any] , _a : Optional[Any] , _a : str , _a : List[Any] , _a : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = True
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MPNetModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def A ( self : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =MPNetModel.from_pretrained('microsoft/mpnet-base' )
_SCREAMING_SNAKE_CASE =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
_SCREAMING_SNAKE_CASE =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 47
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase : Optional[int] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(
image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 47
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[int] = "gpt_neox"
def __init__( self : Dict , _A : List[str]=50432 , _A : Any=6144 , _A : Optional[Any]=44 , _A : Union[str, Any]=64 , _A : List[str]=24576 , _A : Optional[int]="gelu" , _A : Dict=0.2_5 , _A : Tuple=10000 , _A : List[str]=0.0 , _A : int=0.0 , _A : Dict=0.1 , _A : Any=2048 , _A : Any=0.0_2 , _A : List[str]=1E-5 , _A : Any=True , _A : List[Any]=0 , _A : int=2 , _A : str=False , _A : List[str]=True , _A : Optional[Any]=None , **_A : Dict , ) -> int:
"""simple docstring"""
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
snake_case_ : str = vocab_size
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Tuple = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : int = rotary_pct
snake_case_ : str = rotary_emb_base
snake_case_ : List[Any] = attention_dropout
snake_case_ : Union[str, Any] = hidden_dropout
snake_case_ : Dict = classifier_dropout
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : List[Any] = tie_word_embeddings
snake_case_ : Tuple = use_parallel_residual
snake_case_ : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
snake_case_ : str = self.rope_scaling.get('type' , _A )
snake_case_ : Optional[int] = self.rope_scaling.get('factor' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 88
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
__magic_name__: int = MBartConfig
__magic_name__: str = {}
__magic_name__: Union[str, Any] = "gelu"
def __init__( self : List[str] , _A : Optional[int] , _A : List[Any]=13 , _A : List[Any]=7 , _A : Dict=True , _A : Tuple=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : str=2 , _A : str=4 , _A : Tuple=37 , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=20 , _A : Dict=2 , _A : List[str]=1 , _A : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : int = bos_token_id
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Union[str, Any] = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int ) -> str:
"""simple docstring"""
snake_case_ : Dict = TFMBartModel(config=_A ).get_decoder()
snake_case_ : Any = inputs_dict['input_ids']
snake_case_ : List[Any] = input_ids[:1, :]
snake_case_ : Dict = inputs_dict['attention_mask'][:1, :]
snake_case_ : Tuple = inputs_dict['head_mask']
snake_case_ : List[Any] = 1
# first forward pass
snake_case_ : Any = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
snake_case_ ,snake_case_ : str = outputs.to_tuple()
snake_case_ : int = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Optional[int] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__magic_name__: int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__magic_name__: Union[str, Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__: Tuple = True
__magic_name__: Tuple = False
__magic_name__: Any = False
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] , _A : List[Any] , _A : str , _A : int , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFMBartModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
__magic_name__: Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__magic_name__: List[Any] = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : List[str] = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Dict ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
snake_case_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 88
| 1
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 109
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317
| 0
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case ( A__ = True ,*A__ ,**A__ ):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
UpperCAmelCase_ : Optional[int] = False
if main_process_only:
UpperCAmelCase_ : str = PartialState().local_process_index == 0
return _tqdm(*A__ ,**A__ ,disable=A__ )
| 253
|
"""simple docstring"""
from math import factorial
def snake_case ( A__ = 1_00 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 253
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase ( lowercase_ ):
lowercase = 'new-model'
if is_tf_available():
class UpperCamelCase ( lowercase_ ):
lowercase = NewModelConfig
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = 'bert-base-cased'
lowercase_ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = TFAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = 'bert-base-cased'
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(__UpperCamelCase )
lowercase_ , lowercase_ : Dict = TFAutoModelForCausalLM.from_pretrained(__UpperCamelCase ,output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = TFAutoModelForMaskedLM.from_pretrained(__UpperCamelCase )
lowercase_ , lowercase_ : Any = TFAutoModelForMaskedLM.from_pretrained(__UpperCamelCase ,output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
lowercase_ , lowercase_ : int = TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ,output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase_ : Dict = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase_ : str = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
@slow
@require_tensorflow_probability
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : int = TFAutoModelForTableQuestionAnswering.from_pretrained(__UpperCamelCase )
lowercase_ , lowercase_ : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
__UpperCamelCase ,output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase ) ,1_4410 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase ) ,1_4410 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = copy.deepcopy(model.config )
lowercase_ : Tuple = ['FunnelBaseModel']
lowercase_ : Tuple = TFAutoModel.from_config(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = TFAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
try:
AutoConfig.register('new-model' ,__UpperCamelCase )
lowercase_ : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__UpperCamelCase ):
auto_class.register(__UpperCamelCase ,__UpperCamelCase )
auto_class.register(__UpperCamelCase ,__UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
auto_class.register(__UpperCamelCase ,__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase_ : Union[str, Any] = BertModelTester(self ).get_config()
lowercase_ : List[Any] = NewModelConfig(**tiny_config.to_dict() )
lowercase_ : str = auto_class.from_config(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = auto_class.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,'bert-base is not a local folder and is not a valid model identifier' ):
lowercase_ : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase_ : Dict = TFAutoModel.from_pretrained(__UpperCamelCase ,revision='aaaaaa' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,):
lowercase_ : Union[str, Any] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase ,'Use `from_pt=True` to load this model' ):
lowercase_ : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowercase_ : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
lowercase_ : List[str] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
lowercase_ : Dict = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 213
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if not nums:
raise ValueError('List is empty' )
return sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_SCREAMING_SNAKE_CASE =[(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_SCREAMING_SNAKE_CASE =''
else:
_SCREAMING_SNAKE_CASE ='deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE =in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dct.pop(A__ )
_SCREAMING_SNAKE_CASE =val
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='http://images.cocodataset.org/val2017/000000039769.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTConfig()
# all deit models have fine-tuned heads
_SCREAMING_SNAKE_CASE =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_SCREAMING_SNAKE_CASE =10_00
_SCREAMING_SNAKE_CASE ='huggingface/label-files'
_SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json'
_SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
_SCREAMING_SNAKE_CASE ={int(A__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =idalabel
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =int(deit_name[-6:-4] )
_SCREAMING_SNAKE_CASE =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
_SCREAMING_SNAKE_CASE =1_92
_SCREAMING_SNAKE_CASE =7_68
_SCREAMING_SNAKE_CASE =12
_SCREAMING_SNAKE_CASE =3
elif deit_name[9:].startswith('small' ):
_SCREAMING_SNAKE_CASE =3_84
_SCREAMING_SNAKE_CASE =15_36
_SCREAMING_SNAKE_CASE =12
_SCREAMING_SNAKE_CASE =6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
_SCREAMING_SNAKE_CASE =10_24
_SCREAMING_SNAKE_CASE =40_96
_SCREAMING_SNAKE_CASE =24
_SCREAMING_SNAKE_CASE =16
# load original model from timm
_SCREAMING_SNAKE_CASE =timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE =timm_model.state_dict()
_SCREAMING_SNAKE_CASE =create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
_SCREAMING_SNAKE_CASE =DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_SCREAMING_SNAKE_CASE =int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_SCREAMING_SNAKE_CASE =DeiTImageProcessor(size=A__ , crop_size=config.image_size )
_SCREAMING_SNAKE_CASE =image_processor(images=prepare_img() , return_tensors='pt' )
_SCREAMING_SNAKE_CASE =encoding['pixel_values']
_SCREAMING_SNAKE_CASE =model(A__ )
_SCREAMING_SNAKE_CASE =timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 358
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowerCAmelCase : List[str] = 'CIDAS/clipseg-rd64-refined'
lowerCAmelCase : Optional[int] = 'image_segmenter'
lowerCAmelCase : Union[str, Any] = CLIPSegForImageSegmentation
lowerCAmelCase : str = ['image', 'text']
lowerCAmelCase : int = ['image']
def __init__( self : Tuple ,*_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : "Image" ,_UpperCAmelCase : str ):
return self.pre_processor(text=[label] ,images=[image] ,padding=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[Any] ):
with torch.no_grad():
_a : int = self.model(**_UpperCAmelCase ).logits
return logits
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Optional[int] ):
_a : Dict = outputs.cpu().detach().numpy()
_a : List[str] = 0
_a : int = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 89
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89
| 1
|
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ ( a_ ):
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(snake_case__ , """num_heads""" ) )
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=3 , snake_case__=[16, 48, 96] , snake_case__=[1, 3, 6] , snake_case__=[1, 2, 10] , snake_case__=[7, 3, 3] , snake_case__=[4, 2, 2] , snake_case__=[2, 1, 1] , snake_case__=[2, 2, 2] , snake_case__=[False, False, True] , snake_case__=[0.0, 0.0, 0.0] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=True , snake_case__=2 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = num_labels
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = stride_kv
UpperCAmelCase = depth
UpperCAmelCase = cls_token
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = CvtModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ )
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase , UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = CvtForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Optional[int] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_A : Dict = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
_A : int = False
_A : Dict = False
_A : Optional[int] = False
_A : List[str] = False
_A : str = False
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = CvtModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
UpperCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CvtModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**snake_case__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 248
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : Dict = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'transfo-xl'
SCREAMING_SNAKE_CASE__ = ['mems']
SCREAMING_SNAKE_CASE__ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=26_7735 , _lowerCamelCase=[2_0000, 4_0000, 20_0000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ):
a :List[str] = vocab_size
a :Union[str, Any] = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
a :Optional[int] = [False] + [True] * len(self.cutoffs )
else:
a :Any = [False] + [False] * len(self.cutoffs )
a :Optional[int] = d_model
a :Union[str, Any] = d_embed
a :str = d_head
a :Optional[Any] = d_inner
a :Optional[Any] = div_val
a :int = pre_lnorm
a :Dict = n_layer
a :List[Any] = n_head
a :Any = mem_len
a :Any = same_length
a :str = attn_type
a :Optional[Any] = clamp_len
a :Optional[int] = sample_softmax
a :Optional[int] = adaptive
a :Optional[int] = dropout
a :Tuple = dropatt
a :Dict = untie_r
a :List[Any] = init
a :int = init_range
a :Optional[int] = proj_init_std
a :Optional[Any] = init_std
a :Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 94
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 86
| 0
|
__a :Optional[Any] = 'Input must be a string of 8 numbers plus letter'
__a :int = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
A_ = spanish_id.replace("-" ,"" ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
A_ = int(spanish_id_clean[0:8] )
A_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class a__ :
lowerCamelCase : Tuple =LEDConfig
lowerCamelCase : Tuple ={}
lowerCamelCase : int ="gelu"
def __init__( self : Union[str, Any] , a : str , a : str=13 , a : Tuple=7 , a : Any=True , a : str=False , a : str=99 , a : str=32 , a : str=2 , a : int=4 , a : Dict=37 , a : Any=0.1 , a : int=0.1 , a : List[Any]=20 , a : Optional[Any]=2 , a : int=1 , a : Union[str, Any]=0 , a : List[Any]=4 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCamelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCamelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCamelCase = prepare_led_inputs_dict(a , a , a )
__lowerCamelCase = tf.concat(
[tf.zeros_like(a )[:, :-1], tf.ones_like(a )[:, -1:]] , axis=-1 , )
__lowerCamelCase = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Dict , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = TFLEDModel(config=a ).get_decoder()
__lowerCamelCase = inputs_dict["""input_ids"""]
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(a , attention_mask=a , use_cache=a )
__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(a , attention_mask=a )[0]
__lowerCamelCase = model(a , attention_mask=a , past_key_values=a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1e-3 )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> Any:
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase : Any =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase : List[Any] =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : str =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : Dict =True
lowerCamelCase : Tuple =False
lowerCamelCase : List[str] =False
lowerCamelCase : Any =False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = TFLEDModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
__lowerCamelCase = 2
__lowerCamelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__lowerCamelCase = True
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a : Union[str, Any] ):
__lowerCamelCase = outputs.decoder_attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a : int ):
__lowerCamelCase = [t.numpy() for t in outputs.encoder_attentions]
__lowerCamelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = model_class(a )
__lowerCamelCase = model(self._prepare_for_class(a , a ) )
__lowerCamelCase = len(a )
self.assertEqual(config.output_hidden_states , a )
check_encoder_attentions_output(a )
if self.is_encoder_decoder:
__lowerCamelCase = model_class(a )
__lowerCamelCase = model(self._prepare_for_class(a , a ) )
self.assertEqual(config.output_hidden_states , a )
check_decoder_attentions_output(a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(a )
__lowerCamelCase = model(self._prepare_for_class(a , a ) )
self.assertEqual(config.output_hidden_states , a )
check_encoder_attentions_output(a )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(a )
__lowerCamelCase = model(self._prepare_for_class(a , a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a ) )
self.assertEqual(model.config.output_hidden_states , a )
check_encoder_attentions_output(a )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
return tf.constant(A__ , dtype=tf.intaa )
__UpperCAmelCase =1e-4
@slow
@require_tf
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__lowerCamelCase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = prepare_led_inputs_dict(model.config , a , a )
__lowerCamelCase = model(**a )[0]
__lowerCamelCase = (1, 10_24, 7_68)
self.assertEqual(output.shape , a )
# change to expected output here
__lowerCamelCase = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__lowerCamelCase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__lowerCamelCase = prepare_led_inputs_dict(model.config , a , a )
__lowerCamelCase = model(**a )[0]
__lowerCamelCase = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , a )
# change to expected output here
__lowerCamelCase = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-3 , rtol=1e-3 )
| 67
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : str = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : str = mock.Mock()
lowerCAmelCase_ : Optional[Any] = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = HTTPError
lowerCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase ) as mock_head:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Dict ) -> Any:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ) -> str:
lowerCAmelCase_ : Dict = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __lowercase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""test-feature-extractor""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ : Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowerCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 120
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (DPMSolverSinglestepScheduler,)
UpperCAmelCase = (("""num_inference_steps""", 25),)
def _snake_case ( self ,**a_ ) -> Dict:
_UpperCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_lowercase )
return config
def _snake_case ( self ,a_=0 ,**a_ ) -> List[Any]:
_UpperCAmelCase : Dict = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop('num_inference_steps' ,_lowercase )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Optional[int] = 0.1 * sample
_UpperCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.get_scheduler_config(**_lowercase )
_UpperCAmelCase : List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_UpperCAmelCase : Optional[int] = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_UpperCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase ,_UpperCAmelCase : int = sample, sample
for t in range(_lowercase ,time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase : Union[str, Any] = scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ,a_=0 ,**a_ ) -> Tuple:
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop('num_inference_steps' ,_lowercase )
_UpperCAmelCase : Optional[Any] = self.dummy_sample
_UpperCAmelCase : List[Any] = 0.1 * sample
_UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : str = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_UpperCAmelCase : Dict = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase : Any = scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step(_lowercase ,_lowercase ,_lowercase ,**_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ,a_=None ,**a_ ) -> List[str]:
if scheduler is None:
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config(**_lowercase )
_UpperCAmelCase : Optional[int] = scheduler_class(**_lowercase )
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(**_lowercase )
_UpperCAmelCase : Dict = scheduler_class(**_lowercase )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Any = model(_lowercase ,_lowercase )
_UpperCAmelCase : List[str] = scheduler.step(_lowercase ,_lowercase ,_lowercase ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase : str = 50
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase : Dict = model(_lowercase ,_lowercase )
_UpperCAmelCase : str = scheduler.step(_lowercase ,_lowercase ,_lowercase ).prev_sample
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> str:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _snake_case ( self ) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase : str = self.full_loop(scheduler=_lowercase )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_UpperCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase : List[Any] = self.full_loop(scheduler=_lowercase )
_UpperCAmelCase : Dict = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> Optional[int]:
self.check_over_configs(thresholding=_lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase ,prediction_type=_lowercase ,sample_max_value=_lowercase ,algorithm_type='dpmsolver++' ,solver_order=_lowercase ,solver_type=_lowercase ,)
def _snake_case ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _snake_case ( self ) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase ,solver_type=_lowercase ,prediction_type=_lowercase ,algorithm_type=_lowercase ,)
_UpperCAmelCase : Optional[Any] = self.full_loop(
solver_order=_lowercase ,solver_type=_lowercase ,prediction_type=_lowercase ,algorithm_type=_lowercase ,)
assert not torch.isnan(_lowercase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Tuple:
self.check_over_configs(lower_order_final=_lowercase )
self.check_over_configs(lower_order_final=_lowercase )
def _snake_case ( self ) -> int:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> Union[str, Any]:
self.check_over_configs(variance_type=_lowercase )
self.check_over_configs(variance_type='learned_range' )
def _snake_case ( self ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_lowercase ,time_step=0 )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[Any] = self.full_loop()
_UpperCAmelCase : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.full_loop(use_karras_sigmas=_lowercase )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = self.full_loop(prediction_type='v_prediction' )
_UpperCAmelCase : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = self.full_loop(prediction_type='v_prediction' ,use_karras_sigmas=_lowercase )
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config(thresholding=_lowercase ,dynamic_thresholding_ratio=0 )
_UpperCAmelCase : Optional[Any] = scheduler_class(**_lowercase )
_UpperCAmelCase : int = 10
_UpperCAmelCase : int = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Dict = model(_lowercase ,_lowercase )
_UpperCAmelCase : List[str] = scheduler.step(_lowercase ,_lowercase ,_lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 366
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Tuple ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = "swinv2"
__lowerCAmelCase :List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , **__lowercase , ) -> Any:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Optional[Any] = image_size
a__ : Union[str, Any] = patch_size
a__ : List[Any] = num_channels
a__ : Union[str, Any] = embed_dim
a__ : Any = depths
a__ : List[str] = len(__lowercase )
a__ : Optional[Any] = num_heads
a__ : Union[str, Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : List[str] = qkv_bias
a__ : Dict = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : List[Any] = drop_path_rate
a__ : Tuple = hidden_act
a__ : Dict = use_absolute_embeddings
a__ : Tuple = layer_norm_eps
a__ : Tuple = initializer_range
a__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : int = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
a__ : Dict = (0, 0, 0, 0)
| 170
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Dict ="3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 170
| 1
|
import collections
import os
import re
from pathlib import Path
__lowerCAmelCase = 'src/transformers'
# Matches is_xxx_available()
__lowerCAmelCase = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowerCAmelCase = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowerCAmelCase = re.compile(r'''^\s*try:''')
# Catches a line with else:
__lowerCAmelCase = re.compile(r'''^\s*else:''')
def snake_case_ ( snake_case ) -> List[str]:
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
lowercase__: Any = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def snake_case_ ( snake_case ) -> List[str]:
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__: Optional[int] = f.readlines()
lowercase__: int = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__: Optional[int] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__: List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
lowercase__: Optional[int] = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
lowercase__: Union[str, Any] = re.findall(R'\[([^\]]+)\]' , lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__: int = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
lowercase__: str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__: Any = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__: Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__: Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__: Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__: int = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
lowercase__: Optional[Any] = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(', ' )
lowercase__: int = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
lowercase__: Optional[Any] = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(', ' )
lowercase__: List[Any] = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '\"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__: List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__: Optional[Any] = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__: int = lines[line_index]
lowercase__: Any = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__: str = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__: Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__: Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__: List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__: int = lines[line_index]
lowercase__: Tuple = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( snake_case , snake_case ) -> Tuple:
def find_duplicates(snake_case ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__: str = []
for key in import_dict_objects.keys():
lowercase__: Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowercase__: Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__: Union[str, Any] = """base imports""" if key == """none""" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def snake_case_ ( ) -> Tuple:
lowercase__: Tuple = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
lowercase__: List[Any] = os.path.join(lowerCAmelCase__ , '__init__.py' )
lowercase__: Any = parse_init(lowerCAmelCase__ )
if objects is not None:
lowercase__: Union[str, Any] = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase__: int = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError('\n\n'.join(lowerCAmelCase__ ) )
def snake_case_ ( ) -> Optional[int]:
lowercase__: Union[str, Any] = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__: Any = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
lowercase__: Union[str, Any] = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase__: Optional[Any] = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
lowercase__: Dict = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
__lowerCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def snake_case_ ( ) -> Optional[Any]:
from transformers.utils import direct_transformers_import
lowercase__: Union[str, Any] = direct_transformers_import(lowerCAmelCase__ )
lowercase__: List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase__ , '__init__.py' ) , 'r' ) as f:
lowercase__: Optional[Any] = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , lowerCAmelCase__ ) ) )
lowercase__: List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase__ ) > 0:
lowercase__: str = """\n""".join(f'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
f'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 366
|
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 5_12}
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ = char
UpperCamelCase__ = set(__a )
return pairs
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""]
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="__start__" , SCREAMING_SNAKE_CASE_="__end__" , SCREAMING_SNAKE_CASE_="__unk__" , SCREAMING_SNAKE_CASE_="__null__" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase__ = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase__ = [tuple(merge.split() ) for merge in merges]
UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase__ = {}
@property
def UpperCAmelCase_ (self ):
return len(self.encoder )
def UpperCAmelCase_ (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ = re.sub("""([.,!?()])""" , r""" \1""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = re.sub("""(')""" , r""" \1 """ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = re.sub(r"""\s{2,}""" , """ """ , SCREAMING_SNAKE_CASE_ )
if "\n" in token:
UpperCamelCase__ = token.replace("""\n""" , """ __newln__""" )
UpperCamelCase__ = token.split(""" """ )
UpperCamelCase__ = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase__ = token.lower()
UpperCamelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCamelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE_ )
continue
while True:
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ = bigram
UpperCamelCase__ = []
UpperCamelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
new_word.extend(word[i:j] )
UpperCamelCase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
UpperCamelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """@@ """.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = word[:-4]
UpperCamelCase__ = word
words.append(SCREAMING_SNAKE_CASE_ )
return " ".join(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
UpperCamelCase__ = re.findall(r"""\S+\n?""" , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """ """.join(SCREAMING_SNAKE_CASE_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
UpperCamelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase__ = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 244
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase_ = data_utils.TransfoXLTokenizer
lowerCamelCase_ = data_utils.TransfoXLCorpus
lowerCamelCase_ = data_utils
lowerCamelCase_ = data_utils
def __magic_name__ ( __a : List[Any] , __a : str , __a : Optional[Any] , __a : List[str] ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__a , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
UpperCamelCase__ = corpus.vocab.__dict__
torch.save(__a , __a )
UpperCamelCase__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , __a )
UpperCamelCase__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__a , __a )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ = os.path.abspath(__a )
UpperCamelCase__ = os.path.abspath(__a )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ = TransfoXLConfig()
else:
UpperCamelCase__ = TransfoXLConfig.from_json_file(__a )
print(f"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ = TransfoXLLMHeadModel(__a )
UpperCamelCase__ = load_tf_weights_in_transfo_xl(__a , __a , __a )
# Save pytorch-model
UpperCamelCase__ = os.path.join(__a , __a )
UpperCamelCase__ = os.path.join(__a , __a )
print(f"Save PyTorch model to {os.path.abspath(__a )}" )
torch.save(model.state_dict() , __a )
print(f"Save configuration file to {os.path.abspath(__a )}" )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowerCamelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 244
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = 0
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[int] = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : int = Path(a_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : List[str] = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : List[Any] = Path(a_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
_UpperCAmelCase : int = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCAmelCase : str = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : Optional[int] = Path(a_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(a_ ).to_dict()
config_dict.pop("""image_processor_type""" )
_UpperCAmelCase : List[str] = CLIPImageProcessor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
_UpperCAmelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : str = Path(a_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> List[Any]:
with self.assertRaisesRegex(
a_ ,"""clip-base is not a local folder and is not a valid model identifier""" ):
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""clip-base""" )
def _snake_case ( self ) -> List[str]:
with self.assertRaisesRegex(
a_ ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(a_ ,revision="""aaaaaa""" )
def _snake_case ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
a_ ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _snake_case ( self ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a_ ):
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
_UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a_ ,trust_remote_code=a_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def _snake_case ( self ) -> Any:
try:
AutoConfig.register("""custom""" ,a_ )
AutoImageProcessor.register(a_ ,a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoImageProcessor.register(a_ ,a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[int] = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : Dict = Path(a_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
_UpperCAmelCase : Union[str, Any] = CustomImageProcessor.from_pretrained(a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> str:
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = True
try:
AutoConfig.register("""custom""" ,a_ )
AutoImageProcessor.register(a_ ,a_ )
# If remote code is not set, the default is to use local
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(a_ ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 355
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59
|
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 0
|
"""simple docstring"""
from math import factorial
def a__ ( snake_case__ = 20 ) -> int:
lowerCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 168
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = False, False, False
@dataclass
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__UpperCamelCase = field(default="Audio" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCamelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCamelCase = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCamelCase , lowerCamelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCamelCase = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCamelCase = token_per_repo_id or {}
lowerCamelCase = path.split("""::""" )[-1]
try:
lowerCamelCase = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
lowerCamelCase , lowerCamelCase = sf.read(_a )
else:
lowerCamelCase , lowerCamelCase = sf.read(_a )
lowerCamelCase = array.T
if self.mono:
lowerCamelCase = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCAmelCase ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCamelCase = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCamelCase = storage.field("""bytes""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCamelCase = storage.field("""path""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a , """rb""" ) as f:
lowerCamelCase = f.read()
return bytes_
lowerCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 168
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 / 255 , SCREAMING_SNAKE_CASE_=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase :List[str] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase :List[str] = parent
UpperCamelCase :Dict = batch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :str = min_resolution
UpperCamelCase :Any = max_resolution
UpperCamelCase :List[Any] = do_resize
UpperCamelCase :Optional[int] = size
UpperCamelCase :List[str] = do_normalize
UpperCamelCase :Any = image_mean
UpperCamelCase :Union[str, Any] = image_std
UpperCamelCase :int = do_rescale
UpperCamelCase :str = rescale_factor
UpperCamelCase :str = do_pad
def UpperCAmelCase ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
if not batched:
UpperCamelCase :List[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
UpperCamelCase :str = image.size
else:
UpperCamelCase :int = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase :List[str] = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase :Optional[int] = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase :Dict = self.size['''shortest_edge''']
UpperCamelCase :List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase :Union[str, Any] = self.size['''shortest_edge''']
UpperCamelCase :Optional[int] = self.size['''shortest_edge''']
else:
UpperCamelCase :Any = []
for image in image_inputs:
UpperCamelCase :List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase :List[str] = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
UpperCamelCase :Optional[int] = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Optional[Any] = YolosImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase :Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ) -> Any:
# Initialize image_processing
UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
UpperCamelCase :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase :int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Optional[int] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase :Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
UpperCamelCase :List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase :Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase :Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processings
UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase :Dict = self.image_processing_class(do_resize=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_rescale=SCREAMING_SNAKE_CASE_ )
# create random PyTorch tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase :int = image_processing_a.pad(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
UpperCamelCase :Optional[int] = image_processing_a(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Tuple:
# prepare image and target
UpperCamelCase :Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase :Optional[int] = json.loads(f.read() )
UpperCamelCase :List[str] = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
UpperCamelCase :Tuple = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
UpperCamelCase :Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase :Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
UpperCamelCase :Dict = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
UpperCamelCase :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase :str = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
UpperCamelCase :Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
UpperCamelCase :Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
UpperCamelCase :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
UpperCamelCase :Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
# prepare image, target and masks_path
UpperCamelCase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase :Any = json.loads(f.read() )
UpperCamelCase :Optional[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
UpperCamelCase :Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase :Dict = YolosImageProcessor(format='''coco_panoptic''' )
UpperCamelCase :Any = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase :Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
UpperCamelCase :List[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
UpperCamelCase :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase :List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
UpperCamelCase :str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
UpperCamelCase :List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
UpperCamelCase :Optional[Any] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
UpperCamelCase :Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
UpperCamelCase :Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
| 259
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263
| 0
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_UpperCamelCase: Any = logging.get_logger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase : Optional[Any] = self.task_name.lower()
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'train'
_lowerCamelCase = 'dev'
_lowerCamelCase = 'test'
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self : List[str], lowerCAmelCase : GlueDataTrainingArguments, lowerCAmelCase : PreTrainedTokenizerBase, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Union[str, Split] = Split.train, lowerCAmelCase : Optional[str] = None, ) -> Any:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py', lowerCAmelCase, )
lowercase : Union[str, Any] = args
lowercase : List[Any] = glue_processors[args.task_name]()
lowercase : List[str] = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowercase : Optional[int] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowercase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''', )
lowercase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase : Optional[int] = label_list[2], label_list[1]
lowercase : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase : Optional[int] = cached_features_file + '.lock'
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowercase : Any = time.time()
lowercase : Dict = torch.load(lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowercase : int = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase : Any = self.processor.get_test_examples(args.data_dir )
else:
lowercase : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase : str = examples[:limit_length]
lowercase : List[Any] = glue_convert_examples_to_features(
lowerCAmelCase, lowerCAmelCase, max_length=args.max_seq_length, label_list=lowerCAmelCase, output_mode=self.output_mode, )
lowercase : Any = time.time()
torch.save(self.features, lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Union[str, Any] ) -> Optional[int]:
return len(self.features )
def __getitem__( self : Dict, lowerCAmelCase : Tuple ) -> InputFeatures:
return self.features[i]
def lowercase ( self : Dict ) -> List[Any]:
return self.label_list
| 53
|
"""simple docstring"""
from collections.abc import Sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase : List[str] = 0 if allow_empty_subarrays else float('-inf' )
lowercase : Dict = 0.0
for num in arr:
lowercase : List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 53
| 1
|
import itertools
import math
def lowerCAmelCase__ ( a__: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def lowerCAmelCase__ ( a__: int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , a__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase: List[str] = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
def __init__( self : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 96
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( _A , _A , _A=0 ):
# Format the message.
if name is None:
a : Tuple = None
else:
a : Dict = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
a : Tuple = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , ':' , val.size() )
else:
print(_A , ':' , _A )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
a : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
a : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
a : int = param.view(*_A )
a : List[str] = param.transpose(0 , 2 )
a : Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
a : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
a : List[str] = param.view(*_A )
a : Union[str, Any] = param.transpose(0 , 1 ).contiguous()
a : List[Any] = param.view(*_A )
return param
def lowerCamelCase__ ( _A , _A , _A ):
# The converted output model.
a : Optional[Any] = {}
# old versions did not store training args
a : Dict = input_state_dict.get('args' , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
a : Union[str, Any] = ds_args.padded_vocab_size
a : str = ds_args.max_position_embeddings
a : Dict = ds_args.hidden_size
a : Union[str, Any] = ds_args.num_layers
a : Dict = ds_args.num_attention_heads
a : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
a : Any = config.n_head
# The hidden_size per head.
a : Tuple = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
a : Any = input_state_dict['checkpoint_version']
else:
a : Any = 0.0
# The model.
a : Optional[int] = input_state_dict['model']
# The language model.
a : Optional[Any] = model['language_model']
# The embeddings.
a : List[str] = lm['embedding']
# The word embeddings.
a : List[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
a : Dict = word_embeddings[: config.vocab_size, :]
a : int = word_embeddings
# The position embeddings.
a : Tuple = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
a : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
a : Optional[Any] = pos_embeddings
# The transformer.
a : Union[str, Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
a : List[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
a : Optional[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
a : Tuple = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
a : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
a : Optional[int] = m.group(2 )
# Is it a weight or a bias?
a : Optional[int] = m.group(3 )
# The name of the layer.
a : Any = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
a : str = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
a : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
a : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
a : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
a : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
a : List[str] = masked_bias
a : Union[str, Any] = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
a : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
a : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
a : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
a : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
a : Tuple = megatron_to_transformers[op_name]
a : List[str] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
a : Dict = megatron_to_transformers[op_name]
a : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
a : str = transformer['final_layernorm.weight']
a : List[str] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
a : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ):
# Create the argument parser.
a : Dict = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_A , help='An optional config json file describing the pre-trained model.' , )
a : Union[str, Any] = parser.parse_args()
# Extract the basename.
a : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
a : Union[str, Any] = torch.load(_A , map_location='cpu' )
else:
a : Any = torch.load(args.path_to_checkpoint , map_location='cpu' )
a : List[Any] = input_state_dict.get('args' , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
a : int = 'gelu_fast'
elif ds_args.openai_gelu:
a : Dict = 'gelu_new'
else:
a : Any = 'gelu'
else:
# in the very early days this used to be "gelu_new"
a : Any = 'gelu_new'
# Spell out all parameters in case the defaults change.
a : Tuple = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
a : str = GPTaConfig.from_json_file(args.config_file )
a : Any = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
a : Union[str, Any] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
a : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
a : Tuple = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
a : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
a : Optional[Any] = 'gpt2'
a : Tuple = AutoTokenizer.from_pretrained(_A )
a : str = type(_A ).__name__
a : List[str] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_A )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
a : Optional[int] = os.path.join(_A , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 96
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Any =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Union[str, Any] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = 1_024
lowerCAmelCase_ : List[Any] = 4_096
lowerCAmelCase_ : List[Any] = 24
lowerCAmelCase_ : Optional[Any] = 16
lowerCAmelCase_ : Tuple = [5, 11, 17, 23]
lowerCAmelCase_ : str = [256, 512, 1_024, 1_024]
lowerCAmelCase_ : Union[str, Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase_ : Any = 768
lowerCAmelCase_ : List[str] = [1, 1, 1, 0.5]
lowerCAmelCase_ : Dict = [256, 512, 768, 768]
lowerCAmelCase_ : List[str] = 150
lowerCAmelCase_ : Union[str, Any] = 16
lowerCAmelCase_ : Any = (1, 384, 384)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Tuple = '''project'''
if "ade" in checkpoint_url:
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Tuple = 768
lowerCAmelCase_ : Optional[int] = [1, 1, 1, 0.5]
lowerCAmelCase_ : str = 150
lowerCAmelCase_ : Tuple = 16
lowerCAmelCase_ : int = '''huggingface/label-files'''
lowerCAmelCase_ : Dict = '''ade20k-id2label.json'''
lowerCAmelCase_ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase_ : str = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Union[str, Any] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase_ : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
lowerCAmelCase_ : List[str] = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
lowerCAmelCase_ : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
lowerCAmelCase_ : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCAmelCase_ : Dict = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
lowerCAmelCase_ : str = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase_ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCAmelCase_ : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
lowerCAmelCase_ : Tuple = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
lowerCAmelCase_ : Tuple = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
lowerCAmelCase_ : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
lowerCAmelCase_ : int = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
lowerCAmelCase_ : Any = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
lowerCAmelCase_ : int = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase_ : int = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCAmelCase_ : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
lowerCAmelCase_ : Dict = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
lowerCAmelCase_ : Any = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
lowerCAmelCase_ : int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase_ : str = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase_ : str = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase_ : str = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
lowerCAmelCase_ : Tuple = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
lowerCAmelCase_ : str = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
lowerCAmelCase_ : Tuple = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
lowerCAmelCase_ : List[str] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
lowerCAmelCase_ : Tuple = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase_ : Any = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
lowerCAmelCase_ : str = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase_ : int = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
lowerCAmelCase_ : List[str] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : Dict = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : str = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase_ : List[str] = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ : Dict = state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
lowerCAmelCase_ : Union[str, Any] = DPTForSemanticSegmentation(lowerCAmelCase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
lowerCAmelCase_ : Tuple = 480 if '''ade''' in checkpoint_url else 384
lowerCAmelCase_ : Any = DPTImageProcessor(size=lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )
# forward pass
lowerCAmelCase_ : str = model(**lowerCAmelCase_ ).logits if '''ade''' in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
if show_prediction:
lowerCAmelCase_ : Any = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=lowerCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
_UpperCAmelCase : List[Any] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 262
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss
| 262
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
_lowerCAmelCase : Any = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :Iterable[int] ):
'''simple docstring'''
A_ : Node | None = None
for i in sorted(snake_case , reverse=snake_case ):
A_ : str = Node(snake_case , self.head )
def __iter__( self :Any ):
'''simple docstring'''
A_ : List[Any] = self.head
while node:
yield node.data
A_ : Optional[int] = node.next_node
def __len__( self :Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self :Tuple ):
'''simple docstring'''
return " -> ".join([str(snake_case ) for node in self] )
def __snake_case ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 70
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_lowerCAmelCase : Dict = '''scheduler_config.json'''
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = 5
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = SCHEDULER_CONFIG_NAME
__UpperCamelCase = ['''dtype''']
__UpperCamelCase = []
__UpperCamelCase = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls :List[str] , snake_case :Dict[str, Any] = None , snake_case :Optional[str] = None , snake_case :Tuple=False , **snake_case :str , ):
'''simple docstring'''
A_ , A_ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=snake_case , subfolder=snake_case , return_unused_kwargs=snake_case , **snake_case , )
A_ , A_ : List[str] = cls.from_config(snake_case , return_unused_kwargs=snake_case , **snake_case )
if hasattr(snake_case , "create_state" ) and getattr(snake_case , "has_state" , snake_case ):
A_ : List[str] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, os.PathLike] , snake_case :bool = False , **snake_case :Optional[Any] ):
'''simple docstring'''
self.save_config(save_directory=snake_case , push_to_hub=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE ( cls :int ):
'''simple docstring'''
A_ : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Optional[int] = importlib.import_module(__name__.split("." )[0] )
A_ : Dict = [
getattr(snake_case , snake_case ) for c in compatible_classes_str if hasattr(snake_case , snake_case )
]
return compatible_classes
def __snake_case ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Tuple[int] ) -> jnp.ndarray:
assert len(_lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCAmelCase ) - x.ndim) ) , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Dict=0.9_99 , _lowerCAmelCase : Optional[int]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowerCAmelCase : List[str] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
A_ : str = []
for i in range(_lowerCAmelCase ):
A_ : Any = i / num_diffusion_timesteps
A_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCAmelCase ) / alpha_bar(_lowerCAmelCase ) , _lowerCAmelCase ) )
return jnp.array(_lowerCAmelCase , dtype=_lowerCAmelCase )
@flax.struct.dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
@classmethod
def SCREAMING_SNAKE_CASE ( cls :int , snake_case :Dict ):
'''simple docstring'''
A_ : List[str] = scheduler.config
if config.trained_betas is not None:
A_ : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
A_ : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
A_ : str = 1.0 - betas
A_ : int = jnp.cumprod(snake_case , axis=0 )
return cls(
alphas=snake_case , betas=snake_case , alphas_cumprod=snake_case , )
def __snake_case ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ) -> str:
A_ : str = state.alphas_cumprod
A_ : Dict = alphas_cumprod[timesteps] ** 0.5
A_ : int = sqrt_alpha_prod.flatten()
A_ : Optional[Any] = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
A_ : str = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
A_ : Union[str, Any] = broadcast_to_shape_from_left(_lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __snake_case ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ) -> Optional[Any]:
A_ , A_ : int = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __snake_case ( _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ) -> List[str]:
A_ , A_ : Any = get_sqrt_alpha_prod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 70
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *__UpperCAmelCase : int , **__UpperCAmelCase : Optional[Any] ) ->int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
a = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
a = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
a = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
a = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
a = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
a = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329
| 0
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __magic_name__ :
def __init__( self : int , snake_case__ : int = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
lowercase :Dict = primes[group]["prime"]
lowercase :Any = primes[group]["generator"]
lowercase :Any = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def __snake_case ( self : List[Any] , snake_case__ : int ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __snake_case ( self : Any , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = int(snake_case__ , base=1_6 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError('''Invalid public key''' )
lowercase :List[str] = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def __snake_case ( snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def __snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 1_4 ):
'''simple docstring'''
lowercase :Dict = int(snake_case__ , base=1_6 )
lowercase :str = int(snake_case__ , base=1_6 )
lowercase :Union[str, Any] = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError('''Invalid public key''' )
lowercase :Dict = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
UpperCAmelCase = {'''facebook/blenderbot-3B''': 128}
class __magic_name__ ( __UpperCAmelCase ):
__A : Any = VOCAB_FILES_NAMES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = ["input_ids", "attention_mask"]
__A : Optional[Any] = BlenderbotTokenizer
def __init__( self : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , snake_case__ : Dict="replace" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Any="</s>" , snake_case__ : Any="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : int=False , snake_case__ : List[Any]=True , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
lowercase :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
lowercase :int = getattr(snake_case__ , pre_tok_state.pop('''type''' ) )
lowercase :List[str] = add_prefix_space
lowercase :Any = pre_tok_class(**snake_case__ )
lowercase :Tuple = add_prefix_space
lowercase :List[Any] = '''post_processor'''
lowercase :Optional[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
lowercase :int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase :List[Any] = tuple(state['''sep'''] )
if "cls" in state:
lowercase :List[str] = tuple(state['''cls'''] )
lowercase :Dict = False
if state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
lowercase :str = add_prefix_space
lowercase :int = True
if state.get('''trim_offsets''' , snake_case__ ) != trim_offsets:
lowercase :List[str] = trim_offsets
lowercase :Optional[Any] = True
if changes_to_apply:
lowercase :Optional[Any] = getattr(snake_case__ , state.pop('''type''' ) )
lowercase :List[Any] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __snake_case ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
lowercase :List[str] = value
def __snake_case ( self : int , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
'''simple docstring'''
lowercase :int = kwargs.get('''is_split_into_words''' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : str ):
'''simple docstring'''
lowercase :int = kwargs.get('''is_split_into_words''' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def __snake_case ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def __snake_case ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :Optional[Any] = [self.sep_token_id]
lowercase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __snake_case ( self : List[str] , snake_case__ : "Conversation" ):
'''simple docstring'''
lowercase :str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case__ )
lowercase :Tuple = ''' '''.join(snake_case__ )
lowercase :Optional[int] = self.encode(snake_case__ )
if len(snake_case__ ) > self.model_max_length:
lowercase :Optional[int] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 172
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a_ = logging.get_logger(__name__)
class _lowercase :
def __init__( self : List[str] , snake_case : Tuple , snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = question_encoder
UpperCamelCase_ : Any = generator
UpperCamelCase_ : Any = self.question_encoder
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if os.path.isfile(snake_case ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(snake_case , exist_ok=snake_case )
UpperCamelCase_ : Tuple = os.path.join(snake_case , 'question_encoder_tokenizer' )
UpperCamelCase_ : List[Any] = os.path.join(snake_case , 'generator_tokenizer' )
self.question_encoder.save_pretrained(snake_case )
self.generator.save_pretrained(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case : List[str] , **snake_case : Any ) -> Optional[int]:
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase_ : List[Any] = kwargs.pop('config' , snake_case )
if config is None:
UpperCamelCase_ : List[Any] = RagConfig.from_pretrained(snake_case )
UpperCamelCase_ : List[str] = AutoTokenizer.from_pretrained(
snake_case , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
UpperCamelCase_ : List[str] = AutoTokenizer.from_pretrained(
snake_case , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=snake_case , generator=snake_case )
def __call__( self : int , *snake_case : List[Any] , **snake_case : int ) -> Any:
"""simple docstring"""
return self.current_tokenizer(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case : Tuple , **snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.generator.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *snake_case : List[Any] , **snake_case : str ) -> Any:
"""simple docstring"""
return self.generator.decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.question_encoder
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.generator
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[str] , snake_case : Optional[List[str]] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : str = "longest" , snake_case : str = None , snake_case : bool = True , **snake_case : List[str] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , snake_case , )
if max_length is None:
UpperCamelCase_ : Any = self.current_tokenizer.model_max_length
UpperCamelCase_ : List[str] = self(
snake_case , add_special_tokens=snake_case , return_tensors=snake_case , max_length=snake_case , padding=snake_case , truncation=snake_case , **snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase_ : Optional[int] = self.current_tokenizer.model_max_length
UpperCamelCase_ : Optional[int] = self(
text_target=snake_case , add_special_tokens=snake_case , return_tensors=snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , **snake_case , )
UpperCamelCase_ : Union[str, Any] = labels['input_ids']
return model_inputs
| 175
|
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
def get_matched_characters(lowerCamelCase : str , lowerCamelCase : str ) -> str:
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase_ : int = int(max(0 , i - limit ) )
UpperCamelCase_ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase )
UpperCamelCase_ : Dict = F"{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}"
return "".join(lowerCamelCase )
# matching characters
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase )
# transposition
UpperCamelCase_ : int = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase_ : Union[str, Any] = 0.0
else:
UpperCamelCase_ : str = (
1
/ 3
* (
match_count / len(lowerCamelCase )
+ match_count / len(lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase_ : Dict = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 175
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A , _A , _A=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
lowerCAmelCase_ = nn.Parameter(_A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
lowerCAmelCase_ = nn.Parameter(_A )
def __UpperCamelCase ( _A , _A , _A ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase_ = np.asarray(weights[0] )
lowerCAmelCase_ = np.asarray(weights[1] )
lowerCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , )
def __UpperCamelCase ( _A , _A , _A ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase_ = np.asarray(weights[0] )
lowerCAmelCase_ = np.asarray(weights[1] )
lowerCAmelCase_ = np.asarray(weights[2] )
lowerCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_A ).transpose(1 , 2 ).contiguous().view(-1 , _A ) , )
set_param(
torch_layer.output.dense , torch.tensor(_A ).view(-1 , _A ).contiguous().transpose(0 , 1 ) , )
def __UpperCamelCase ( _A , _A , _A ):
# layernorm 1
lowerCAmelCase_ = weights[0][0][0]
lowerCAmelCase_ = np.asarray(layer_norm_a[0] )
lowerCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , )
# lsh weights + output
lowerCAmelCase_ = weights[0][1]
if len(_A ) < 4:
set_layer_weights_in_torch_lsh(_A , torch_block.attention , _A )
else:
set_layer_weights_in_torch_local(_A , torch_block.attention , _A )
# intermediate weighs
lowerCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(_A ) == 4:
lowerCAmelCase_ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase_ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , )
# intermediate dense
lowerCAmelCase_ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , )
# intermediate out
lowerCAmelCase_ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , )
def __UpperCamelCase ( _A , _A , _A ):
# reformer model
lowerCAmelCase_ = torch_model.reformer
# word embeds
lowerCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_A ) , )
if isinstance(weights[3] , _A ):
lowerCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
lowerCAmelCase_ = nn.Parameter(torch.tensor(_A ) )
lowerCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_A , _A , _A )
# output layer norm
lowerCAmelCase_ = np.asarray(weights[7][0] )
lowerCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_A ) , torch.tensor(_A ) , )
# output embeddings
lowerCAmelCase_ = np.asarray(weights[9][0] )
lowerCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_A ).transpose(0 , 1 ).contiguous() , torch.tensor(_A ) , )
def __UpperCamelCase ( _A , _A , _A ):
# Initialise PyTorch model
lowerCAmelCase_ = ReformerConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase_ = ReformerModelWithLMHead(_A )
with open(_A , '''rb''' ) as f:
lowerCAmelCase_ = pickle.load(_A )['''weights''']
set_model_weights_in_torch(_A , _A , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 167
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox'
def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 167
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]="pt" ) -> int:
"""simple docstring"""
snake_case = {"add_prefix_space": True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(' ' ) else {}
snake_case = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Any=None , ) -> str:
"""simple docstring"""
snake_case = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase_ ( __snake_case ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="train" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="" , ):
"""simple docstring"""
super().__init__()
snake_case = Path(A_ ).joinpath(type_path + '.source' )
snake_case = Path(A_ ).joinpath(type_path + '.target' )
snake_case = self.get_char_lens(self.src_file )
snake_case = max_source_length
snake_case = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
snake_case = tokenizer
snake_case = prefix
if n_obs is not None:
snake_case = self.src_lens[:n_obs]
snake_case = src_lang
snake_case = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = index + 1 # linecache starts at 1
snake_case = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('\n' )
snake_case = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
snake_case = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
snake_case = encode_line(A_ , A_ , self.max_source_length , 'right' )
snake_case = encode_line(A_ , A_ , self.max_target_length , 'right' )
snake_case = source_inputs["input_ids"].squeeze()
snake_case = target_inputs["input_ids"].squeeze()
snake_case = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case ( lowerCAmelCase ):
"""simple docstring"""
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = torch.stack([x['input_ids'] for x in batch] )
snake_case = torch.stack([x['attention_mask'] for x in batch] )
snake_case = torch.stack([x['decoder_input_ids'] for x in batch] )
snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
snake_case = trim_batch(A_ , A_ )
snake_case = trim_batch(A_ , A_ , attention_mask=A_ )
snake_case = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
SCREAMING_SNAKE_CASE__ = getLogger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> None:
"""simple docstring"""
snake_case = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'git_log.json' ) )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=4 , **_UpperCamelCase : Any ) -> List[str]:
"""simple docstring"""
with open(_lowerCAmelCase , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def lowerCAmelCase__ ( ) -> Any:
"""simple docstring"""
snake_case = git.Repo(search_parent_directories=_lowerCAmelCase )
snake_case = {
"repo_id": str(_lowerCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) -> List:
"""simple docstring"""
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
with open(_lowerCAmelCase , 'wb' ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
def remove_articles(_UpperCamelCase : Any ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowerCAmelCase )
def white_space_fix(_UpperCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase : str ):
snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
snake_case = normalize_answer(_lowerCAmelCase ).split()
snake_case = normalize_answer(_lowerCAmelCase ).split()
snake_case = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
snake_case = sum(common.values() )
if num_same == 0:
return 0
snake_case = 1.0 * num_same / len(_lowerCAmelCase )
snake_case = 1.0 * num_same / len(_lowerCAmelCase )
snake_case = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
snake_case = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return model_prefix.startswith('rag' )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case = "dropout_rate"
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
snake_case = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 150
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
| 0
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
UpperCAmelCase = VideoClassificationPipeline(model=UpperCamelCase__ ,image_processor=UpperCamelCase__ ,top_k=2 )
UpperCAmelCase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _UpperCamelCase ( self ,A ,A ):
for example in examples:
UpperCAmelCase = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ ,[
{"""score""": ANY(UpperCamelCase__ ), """label""": ANY(UpperCamelCase__ )},
{"""score""": ANY(UpperCamelCase__ ), """label""": ANY(UpperCamelCase__ )},
] ,)
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
UpperCAmelCase = pipeline(
"""video-classification""" ,model=UpperCamelCase__ ,feature_extractor=UpperCamelCase__ ,frame_sampling_rate=4 )
UpperCAmelCase = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
UpperCAmelCase = video_classifier(UpperCamelCase__ ,top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ,decimals=4 ) ,[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] ,)
UpperCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(UpperCamelCase__ ,decimals=4 ) ,[
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def _UpperCamelCase ( self ):
pass
| 370
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_UpperCamelCase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_UpperCamelCase = dataset.iloc[:, 1:2].values
_UpperCamelCase = dataset.iloc[:, 2].values
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_UpperCamelCase = PolynomialFeatures(degree=4)
_UpperCamelCase = poly_reg.fit_transform(X)
_UpperCamelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _a ( ):
"""simple docstring"""
plt.scatter(_snake_case , _snake_case , color="""red""" )
plt.plot(_snake_case , pol_reg.predict(poly_reg.fit_transform(_snake_case ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 234
| 0
|
"""simple docstring"""
_a : Dict= "Tobias Carryer"
from time import time
class UpperCamelCase :
def __init__(self : Dict , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : int=int(time())) -> List[str]: # noqa: B008
__snake_case : List[Any] = multiplier
__snake_case : Dict = increment
__snake_case : Any = modulo
__snake_case : Union[str, Any] = seed
def _lowercase (self : Dict) -> Optional[Any]:
__snake_case : int = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_a : Dict= LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 172
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_UpperCAmelCase = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
_UpperCAmelCase = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
_UpperCAmelCase = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
A_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A_ : Optional[Any] = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 140
| 0
|
def lowerCamelCase__ ( A__ : Optional[int] , A__ : List[Any] , A__ : Any , A__ : Optional[int] , A__ : Optional[Any] , A__ : str ):
'''simple docstring'''
if index == r:
for j in range(A__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__lowerCamelCase = arr[i]
combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any] , A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(A__ , A__ , A__ , 0 , A__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 29
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowercase = str(bin(SCREAMING_SNAKE_CASE ) )
binary_number += "0" * shift_amount
return binary_number
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowercase = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
if shift_amount >= len(SCREAMING_SNAKE_CASE ):
return "0b0"
__lowercase = binary_number[: len(SCREAMING_SNAKE_CASE ) - shift_amount]
return "0b" + shifted_binary_number
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
if number >= 0: # Get binary representation of positive number
__lowercase = '0' + str(bin(SCREAMING_SNAKE_CASE ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__lowercase = len(bin(SCREAMING_SNAKE_CASE )[3:] ) # Find 2's complement of number
__lowercase = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__lowercase = (
'1' + '0' * (binary_number_length - len(SCREAMING_SNAKE_CASE )) + binary_number
)
if shift_amount >= len(SCREAMING_SNAKE_CASE ):
return "0b" + binary_number[0] * len(SCREAMING_SNAKE_CASE )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(SCREAMING_SNAKE_CASE ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325
| 1
|
from __future__ import annotations
__lowerCamelCase : Tuple = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ = 0
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowerCamelCase : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 204
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 204
| 1
|
def lowerCamelCase__ ( a ) -> int:
if not isinstance(a , a ):
raise TypeError('''only integers accepted as input''' )
else:
_A: int = str(abs(a ) )
_A: Optional[int] = [list(a ) for char in range(len(a ) )]
for index in range(len(a ) ):
num_transpositions[index].pop(a )
return max(
int(''''''.join(list(a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 121
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase_ : List[Any] = """pt"""
elif is_tf_available():
lowerCamelCase_ : List[Any] = """tf"""
else:
lowerCamelCase_ : Tuple = """jax"""
class a__ ( __snake_case , unittest.TestCase ):
A__ : Dict = PerceiverTokenizer
A__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
__a = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=2_0 , UpperCAmelCase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__a = []
for i in range(len(UpperCAmelCase ) ):
try:
__a = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__a = list(filter(lambda UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCAmelCase ) )
__a = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
__a = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
__a = toks + toks
# toks_str = [t[1] for t in toks]
__a = [t[0] for t in toks]
# Ensure consistency
__a = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
__a = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
__a = ' ' + output_txt
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.perceiver_tokenizer
__a = 'Unicode €.'
__a = tokenizer(UpperCAmelCase )
__a = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase )
# decoding
__a = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__a = tokenizer('e è é ê ë' )
__a = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase )
# decoding
__a = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.perceiver_tokenizer
__a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__a = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
__a = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
__a = list(batch.input_ids.numpy()[0] )
else:
__a = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.perceiver_tokenizer
__a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__a = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCAmelCase )
self.assertIn('attention_mask' , UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.perceiver_tokenizer
__a = [
'Summary of the text.',
'Another summary.',
]
__a = tokenizer(
text_target=UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# safety check on max_len default value so we are sure the test works
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__a = tempfile.mkdtemp()
__a = ' He is very happy, UNwant\u00E9d,running'
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__a = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__a = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
__a = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__a = tempfile.mkdtemp()
__a = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__a = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__a = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__a = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__a = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__a = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__a = json.load(UpperCAmelCase )
__a = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
__a = added_tokens_extra_ids + [
'an_additional_special_token'
]
__a = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCAmelCase )]
__a = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__a = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__a = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase_ : Dict = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197
| 0
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CodeGenTokenizer
lowerCamelCase__ = CodeGenTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {"""add_prefix_space""": True}
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_lowerCamelCase : Optional[int] = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCamelCase : Union[str, Any] = {'unk_token': '<unk>'}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = 'lower newer'
_lowerCamelCase : Union[str, Any] = 'lower newer'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Optional[Any] = 'lower newer'
_lowerCamelCase : Any = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCamelCase : Dict = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : str = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : int = self.get_rust_tokenizer(add_prefix_space=lowercase )
_lowerCamelCase : Any = 'lower newer'
# Testing tokenization
_lowerCamelCase : Any = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
_lowerCamelCase : str = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids without special tokens
_lowerCamelCase : int = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids with special tokens
_lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=lowercase )
_lowerCamelCase : List[Any] = tokenizer.encode(lowercase , add_prefix_space=lowercase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing the unknown token
_lowerCamelCase : Any = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A_ ( self , *lowercase , **lowercase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self , lowercase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# Simple input
_lowerCamelCase : Optional[Any] = 'This is a simple input'
_lowerCamelCase : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_lowerCamelCase : List[str] = ('This is a simple input', 'This is a pair')
_lowerCamelCase : Optional[int] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , )
def A_ ( self ):
_lowerCamelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_lowerCamelCase : Union[str, Any] = 'This is a simple input'
_lowerCamelCase : str = ['This is a simple input looooooooong', 'This is a simple input']
_lowerCamelCase : Optional[Any] = ('This is a simple input', 'This is a pair')
_lowerCamelCase : Any = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_lowerCamelCase : Tuple = tokenizer.pad_token_id
_lowerCamelCase : Optional[Any] = tokenizer(lowercase , padding='max_length' , max_length=30 , return_tensors='np' )
_lowerCamelCase : List[Any] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' )
_lowerCamelCase : str = tokenizer(*lowercase , padding='max_length' , max_length=60 , return_tensors='np' )
_lowerCamelCase : str = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def A_ ( self ):
_lowerCamelCase : Optional[int] = '$$$'
_lowerCamelCase : Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase )
_lowerCamelCase : Union[str, Any] = 'This is a simple input'
_lowerCamelCase : Dict = ['This is a simple input 1', 'This is a simple input 2']
_lowerCamelCase : List[str] = tokenizer.bos_token_id
_lowerCamelCase : List[Any] = tokenizer(lowercase )
_lowerCamelCase : int = tokenizer(lowercase )
self.assertEqual(out_s.input_ids[0] , lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_lowerCamelCase : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_lowerCamelCase : str = '\nif len_a > len_b: result = a\nelse: result = b'
_lowerCamelCase : Optional[Any] = tokenizer.encode(lowercase )
_lowerCamelCase : str = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_lowerCamelCase : str = tokenizer.decode(lowercase , truncate_before_pattern=lowercase )
self.assertEqual(lowercase , lowercase )
def A_ ( self ):
pass
| 96
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states
| 96
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ = get_tests_dir("fixtures")
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 500
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowercase_ ) as mock_head:
snake_case = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
snake_case = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowerCamelCase ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls ) -> Union[str, Any]:
snake_case = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _lowerCamelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token, repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _lowerCamelCase ( self ) -> Dict:
snake_case = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('test-feature-extractor', use_auth_token=self._token )
snake_case = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_, repo_id='test-feature-extractor', push_to_hub=lowercase_, use_auth_token=self._token )
snake_case = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor', use_auth_token=self._token )
snake_case = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_, repo_id='valid_org/test-feature-extractor-org', push_to_hub=lowercase_, use_auth_token=self._token )
snake_case = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_, getattr(lowercase_, lowercase_ ) )
def _lowerCamelCase ( self ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'}, )
snake_case = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''', trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, 'CustomFeatureExtractor' )
| 332
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_UpperCAmelCase : str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Tuple = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_UpperCAmelCase : Optional[int] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
_UpperCAmelCase : List[str] = 0
for log in Path().glob("*.log"):
_UpperCAmelCase : List[Any] = 0
with open(log, "r") as f:
for line in f:
_UpperCAmelCase : int = json.loads(line)
if line.get("nodeid", "") != "":
_UpperCAmelCase : Any = line["nodeid"]
if line.get("duration", None) is not None:
_UpperCAmelCase : Union[str, Any] = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_UpperCAmelCase : Optional[Any] = []
log.unlink()
_UpperCAmelCase : Tuple = ""
_UpperCAmelCase : List[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = {}
for test in failed_tests:
_UpperCAmelCase : str = test[0].split("::")
_UpperCAmelCase : Optional[Any] = data[0].split("/")[-1]
if data[0] not in filesafailed:
_UpperCAmelCase : Tuple = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_UpperCAmelCase : Optional[int] = [test[0] for test in failed_table]
_UpperCAmelCase : str = list(set(files))
# Count number of instances in failed_tests
_UpperCAmelCase : Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_UpperCAmelCase : Tuple = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_UpperCAmelCase : int = "Too many failed tests, please see the full report in the Action results."
_UpperCAmelCase : str = len(err) + 10
_UpperCAmelCase : Union[str, Any] = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
_UpperCAmelCase : Optional[int] = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_UpperCAmelCase : str = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_UpperCAmelCase : Any = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_UpperCAmelCase : int = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
_UpperCAmelCase : Any = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_UpperCAmelCase : List[str] = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_UpperCAmelCase : Optional[Any] = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_UpperCAmelCase : str = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_UpperCAmelCase : int = row[0]
else:
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Any = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 236
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236
| 1
|
"""simple docstring"""
from copy import deepcopy
class UpperCamelCase :
def __init__( self : int , UpperCAmelCase__ : list[int] | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if arr is None and size is not None:
_a : Dict = size
_a : Tuple = [0] * size
elif arr is not None:
self.init(UpperCAmelCase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _lowercase ( self : Tuple , UpperCAmelCase__ : list[int] ) -> None:
_a : Optional[int] = len(UpperCAmelCase__ )
_a : int = deepcopy(UpperCAmelCase__ )
for i in range(1 , self.size ):
_a : str = self.next_(UpperCAmelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowercase ( self : Optional[Any] ) -> list[int]:
_a : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_a : Optional[int] = self.next_(UpperCAmelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowercase ( UpperCAmelCase__ : int ) -> int:
return index + (index & (-index))
@staticmethod
def _lowercase ( UpperCAmelCase__ : int ) -> int:
return index - (index & (-index))
def _lowercase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_a : List[str] = self.next_(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> None:
self.add(UpperCAmelCase__ , value - self.get(UpperCAmelCase__ ) )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : int ) -> int:
if right == 0:
return 0
_a : int = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_a : Union[str, Any] = self.prev(UpperCAmelCase__ )
return result
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
return self.prefix(UpperCAmelCase__ ) - self.prefix(UpperCAmelCase__ )
def _lowercase ( self : int , UpperCAmelCase__ : int ) -> int:
return self.query(UpperCAmelCase__ , index + 1 )
def _lowercase ( self : Tuple , UpperCAmelCase__ : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
_a : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_a : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324
| 0
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = '''T5Config'''
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = """mt5"""
_lowerCAmelCase : Tuple = MTaConfig
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[int] = """mt5"""
_lowerCAmelCase : Any = MTaConfig
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = """mt5"""
_lowerCAmelCase : List[str] = MTaConfig
| 264
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __a : Any , __a : int=13 , __a : Dict=7 , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : List[Any]=True , __a : Tuple=True , __a : Union[str, Any]=99 , __a : Dict=32 , __a : Dict=5 , __a : str=4 , __a : Optional[int]=37 , __a : str="gelu" , __a : Any=0.1 , __a : Optional[int]=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[int]=2 , __a : str=0.0_2 , __a : Union[str, Any]=4 , ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : List[Any] = seq_length
__snake_case : int = is_training
__snake_case : int = use_attention_mask
__snake_case : List[Any] = use_token_type_ids
__snake_case : Tuple = use_labels
__snake_case : Dict = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : str = type_sequence_label_size
__snake_case : Tuple = initializer_range
__snake_case : Optional[Any] = num_choices
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_attention_mask:
__snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , )
return config, input_ids, attention_mask
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs
__snake_case : List[str] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = FlaxDistilBertModelTester(self )
@slow
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Optional[int] = model_class_name.from_pretrained('distilbert-base-uncased' )
__snake_case : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
__snake_case : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__snake_case : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : str = model(__a , attention_mask=__a )[0]
__snake_case : List[str] = (1, 11, 768)
self.assertEqual(output.shape , __a )
__snake_case : Dict = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 0
|
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[Any] = int(sequence[i] ,2 )
return sequence
def a_ ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : Dict = gray_code_sequence_string(bit_count - 1 )
__snake_case : Any = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : str = '0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : Any = '1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 1
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _A (__a , __a ) -> Any:
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as flax_state_f:
SCREAMING_SNAKE_CASE_ : str = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE_ : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE_ : Tuple = jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : int = ''''''
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(SCREAMING_SNAKE_CASE__ , sep='''.''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE_ : List[Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ : Optional[Any] = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE_ : Any = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE_ : Dict = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE_ : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE_ : Optional[Any] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
SCREAMING_SNAKE_CASE_ : Any = '''.'''.join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE_ : Dict = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE_ : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE_ : List[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 91
|
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase :List[str] = True
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase :List[Any] = True
if a[i].islower():
UpperCamelCase :List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 0
|
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
__A = [[float("inf" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
__A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__A = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = int(input('Enter number of vertices: '))
SCREAMING_SNAKE_CASE :str = int(input('Enter number of edges: '))
SCREAMING_SNAKE_CASE :Tuple = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
SCREAMING_SNAKE_CASE :str = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
SCREAMING_SNAKE_CASE :Any = int(input('Enter source:'))
SCREAMING_SNAKE_CASE :str = int(input('Enter destination:'))
SCREAMING_SNAKE_CASE :int = float(input('Enter weight:'))
SCREAMING_SNAKE_CASE :int = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 124
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "facebook/bart-large-mnli"
snake_case_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
snake_case_ = "text_classifier"
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ["text", ["text"]]
snake_case_ = ["text"]
def UpperCamelCase_ ( self : str ):
super().setup()
__A = self.model.config
__A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__A = int(A )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Dict ):
__A = labels
return self.pre_processor(
[text] * len(A ) ,[f'''This example is {label}''' for label in labels] ,return_tensors="pt" ,padding="max_length" ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Tuple ):
__A = outputs.logits
__A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 124
| 1
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ = """patrickvonplaten/t5-tiny-random"""
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return AutoConfig.from_pretrained(lowerCamelCase__ )
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = create_student_by_copying_alternating_layers(lowerCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = create_student_by_copying_alternating_layers(lowerCamelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCamelCase__ )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = create_student_by_copying_alternating_layers(lowerCamelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCamelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = create_student_by_copying_alternating_layers(lowerCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(lowerCamelCase__ ):
create_student_by_copying_alternating_layers(lowerCamelCase__ , tempfile.mkdtemp() , e=lowerCamelCase__ , d=lowerCamelCase__ )
| 325
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 234
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any]=False ):
"""simple docstring"""
__UpperCamelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase =''''''
else:
__UpperCamelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__UpperCamelCase =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase =in_proj_bias[: config.hidden_size]
__UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =dct.pop(__UpperCamelCase )
__UpperCamelCase =val
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict=True ):
"""simple docstring"""
__UpperCamelCase =ViTConfig()
# patch_size
if model_name[-1] == "8":
__UpperCamelCase =8
# set labels if required
if not base_model:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__UpperCamelCase =3_8_4
__UpperCamelCase =1_5_3_6
__UpperCamelCase =1_2
__UpperCamelCase =6
# load original model from torch hub
__UpperCamelCase =torch.hub.load('''facebookresearch/dino:main''' , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase =original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__UpperCamelCase =create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
__UpperCamelCase =ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
__UpperCamelCase =ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__UpperCamelCase =ViTImageProcessor()
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase =encoding['''pixel_values''']
__UpperCamelCase =model(__UpperCamelCase )
if base_model:
__UpperCamelCase =original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__UpperCamelCase =original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 85
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__UpperCamelCase =(n * n - 2 * a * n) // (2 * n - 2 * a)
__UpperCamelCase =n - a - b
if c * c == (a * a + b * b):
__UpperCamelCase =a * b * c
if candidate >= product:
__UpperCamelCase =candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 85
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class lowerCamelCase :
'''simple docstring'''
_snake_case : str
_snake_case : str
_snake_case : Optional[str] = None
_snake_case : Optional[str] = None
_snake_case : Optional[str] = None
@dataclass(frozen=_snake_case )
class lowerCamelCase :
'''simple docstring'''
_snake_case : List[int]
_snake_case : Optional[List[int]] = None
_snake_case : Optional[List[int]] = None
_snake_case : Optional[Union[int, float]] = None
_snake_case : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[InputFeatures]
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase=False , _UpperCamelCase = False , ) -> Tuple:
UpperCAmelCase_ : List[Any] = hans_processors[task]()
UpperCAmelCase_ : Tuple = os.path.join(
_UpperCamelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCamelCase ) , _UpperCamelCase , ) , )
UpperCAmelCase_ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = label_list[2], label_list[1]
UpperCAmelCase_ : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : Any = cached_features_file + '.lock'
with FileLock(_UpperCamelCase ):
if os.path.exists(_UpperCamelCase ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
UpperCAmelCase_ : Union[str, Any] = torch.load(_UpperCamelCase )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
UpperCAmelCase_ : List[Any] = (
processor.get_dev_examples(_UpperCamelCase ) if evaluate else processor.get_train_examples(_UpperCamelCase )
)
logger.info('Training examples: %s' , len(_UpperCamelCase ) )
UpperCAmelCase_ : Any = hans_convert_examples_to_features(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
logger.info('Saving features into cached file %s' , _UpperCamelCase )
torch.save(self.features , _UpperCamelCase )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , _UpperCamelCase ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase :
'''simple docstring'''
_snake_case : List[InputFeatures]
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1_2_8 , _UpperCamelCase=False , _UpperCamelCase = False , ) -> Optional[int]:
UpperCAmelCase_ : Dict = hans_processors[task]()
UpperCAmelCase_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = label_list[2], label_list[1]
UpperCAmelCase_ : str = label_list
UpperCAmelCase_ : List[str] = processor.get_dev_examples(_UpperCamelCase ) if evaluate else processor.get_train_examples(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = hans_convert_examples_to_features(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ : Tuple = tf.data.Dataset.from_generator(
_UpperCamelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __UpperCAmelCase ( self ) -> Optional[int]:
return self.dataset
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , _UpperCamelCase ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self ) -> List[str]:
return self.label_list
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
return self._create_examples(self._read_tsv(os.path.join(_UpperCamelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(_UpperCamelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def __UpperCAmelCase ( self ) -> Tuple:
return ["contradiction", "entailment", "neutral"]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[str] = []
for i, line in enumerate(_UpperCamelCase ):
if i == 0:
continue
UpperCAmelCase_ : Dict = '%s-%s' % (set_type, line[0])
UpperCAmelCase_ : Tuple = line[5]
UpperCAmelCase_ : Tuple = line[6]
UpperCAmelCase_ : Dict = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase_ : Tuple = line[0]
examples.append(InputExample(guid=_UpperCamelCase , text_a=_UpperCamelCase , text_b=_UpperCamelCase , label=_UpperCamelCase , pairID=_UpperCamelCase ) )
return examples
def lowercase__ ( __snake_case : List[InputExample] , __snake_case : List[str] , __snake_case : int , __snake_case : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = {label: i for i, label in enumerate(__snake_case )}
UpperCAmelCase_ : Optional[int] = []
for ex_index, example in tqdm.tqdm(enumerate(__snake_case ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase_ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__snake_case , max_length=__snake_case , padding='max_length' , truncation=__snake_case , return_overflowing_tokens=__snake_case , )
UpperCAmelCase_ : str = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ : List[str] = int(example.pairID )
features.append(InputFeatures(**__snake_case , label=__snake_case , pairID=__snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"guid: {example}" )
logger.info(F"features: {features[i]}" )
return features
__UpperCAmelCase = {
'hans': 3,
}
__UpperCAmelCase = {
'hans': HansProcessor,
}
| 29
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowercase : Tuple = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : str =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Dict =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(
image=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''', ).images
lowerCamelCase__ : Dict =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 272
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Tuple = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : torch.nn.Module , __lowerCamelCase : BnbQuantizationConfig , __lowerCamelCase : Union[str, os.PathLike] = None , __lowerCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
lowerCamelCase__ : str =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCamelCase__ : str =[]
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ : Union[str, Any] =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ : Any =get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowerCamelCase__ : Tuple =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : List[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : List[str] =load_in_abit
lowerCamelCase__ : Union[str, Any] =get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCamelCase__ : str =replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowerCamelCase__ : Union[str, Any] =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ : Optional[int] =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ : Dict =replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowerCamelCase__ : Optional[int] =get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : Dict =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ : List[Any] ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCamelCase__ : List[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ : int ={}
lowerCamelCase__ : Optional[int] =special_dtypes
lowerCamelCase__ : List[str] =no_split_module_classes
lowerCamelCase__ : Tuple =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ : List[str] =get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase__ : str =max_memory
lowerCamelCase__ : Any =infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ : List[str] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ : List[str] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : List[Any] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ : Optional[Any] =[]
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ : Optional[Any] ='''.'''.join(__lowerCamelCase )
lowerCamelCase__ : Tuple =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ : Any =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ : List[str] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ : str =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCamelCase__ : Any =module.weight.data
if module.bias is not None:
lowerCamelCase__ : Any =module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
lowerCamelCase__ : Optional[Any] =deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ : Union[str, Any] =find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : List[str] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ : Any =sum(__lowerCamelCase , [] )
lowerCamelCase__ : Any =len(__lowerCamelCase ) > 0
# Check if it is a base model
lowerCamelCase__ : Optional[Any] =False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowerCamelCase__ : Dict =not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ : List[str] =list(model.named_children() )
lowerCamelCase__ : Any =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ : Optional[Any] =set(__lowerCamelCase ) - set(__lowerCamelCase )
lowerCamelCase__ : List[str] =list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowerCamelCase__ : Optional[Any] =['''.weight''', '''.bias''']
lowerCamelCase__ : List[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ : Union[str, Any] =name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def snake_case__ ( __lowerCamelCase : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =param_name
lowerCamelCase__ : Dict =model
if "." in tensor_name:
lowerCamelCase__ : Optional[int] =tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase__ : Union[str, Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowerCamelCase__ : Union[str, Any] =new_module
lowerCamelCase__ : List[Any] =splits[-1]
# offload weights
lowerCamelCase__ : Optional[Any] =False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 272
| 1
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
__lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = ""
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
for keychar, cipherchar in zip(cycle(A_ ), A_ ):
__magic_name__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(A_ )
return decoded
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = []
for key in product(A_, repeat=3 ):
__magic_name__ = try_key(A_, A_ )
if encoded is not None:
possibles.append(A_ )
return possibles
def a__ ( A_, A_ ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def a__ ( A_ = "p059_cipher.txt" ):
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = Path(A_ ).parent.joinpath(A_ ).read_text(encoding="""utf-8""" )
__magic_name__ = [int(A_ ) for number in data.strip().split(""",""" )]
__magic_name__ = filter_valid_chars(A_ )
for common_word in COMMON_WORDS:
__magic_name__ = filter_common_word(A_, A_ )
if len(A_ ) == 1:
break
__magic_name__ = possibles[0]
return sum(ord(A_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88
| 1
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__magic_name__ = ['''torch''', '''scipy''']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 359
|
import math
class A__ :
"""simple docstring"""
def a_ ( self , __snake_case , __snake_case ):
snake_case = 0.0
snake_case = 0.0
for i in range(len(__snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case = SelfOrganizingMap()
snake_case = 3
snake_case = 0.5
for _ in range(UpperCamelCase_ ):
for j in range(len(UpperCamelCase_ ) ):
# training sample
snake_case = training_samples[j]
# Compute the winning vector
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# Update the winning vector
snake_case = self_organizing_map.update(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# classify test sample
snake_case = [0, 0, 0, 1]
snake_case = self_organizing_map.get_winner(UpperCamelCase_ ,UpperCamelCase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 213
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ) -> Optional[Any]:
if audio_length_in_s is None:
UpperCamelCase :List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase :List[str] = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase :str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCamelCase :Optional[Any] = int(__lowerCAmelCase )
if sample_size % down_scale_factor != 0:
UpperCamelCase :Any = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
UpperCamelCase :Tuple = int(__lowerCAmelCase )
UpperCamelCase :Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase :Any = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase :int = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase , device=audio.device )
UpperCamelCase :List[str] = self.scheduler.timesteps.to(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase :Dict = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase :Dict = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
UpperCamelCase :str = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase :Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__lowerCAmelCase )
| 259
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCAmelCase : Dict =version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :tuple , lowerCAmelCase__ :Path , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> str:
'''simple docstring'''
lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase = """cpu"""
lowercase = Path(lowerCAmelCase__ )
# VAE DECODER
lowercase = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase = vae_decoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , 2_5 , 2_5 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__lowerCAmelCase : Dict =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 197
| 0
|
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__UpperCAmelCase = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 365
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : int = 3_2 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCAmelCase : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Any=3_0 , _lowerCAmelCase : Dict=4_0_0 , _lowerCAmelCase : List[Any]=3 , ):
'''simple docstring'''
__lowercase =parent
__lowercase =do_resize
__lowercase =size if size is not None else {'shortest_edge': 2_8_8}
__lowercase =size_divisor
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_normalize
__lowercase =do_center_crop
__lowercase =image_mean
__lowercase =image_std
__lowercase =do_pad
__lowercase =batch_size
__lowercase =num_channels
__lowercase =min_resolution
__lowercase =max_resolution
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=False):
'''simple docstring'''
if not batched:
__lowercase =self.size['shortest_edge']
__lowercase =image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image):
__lowercase , __lowercase =image.size
else:
__lowercase , __lowercase =image.shape[1], image.shape[2]
__lowercase =size / min(_lowerCAmelCase , _lowerCAmelCase)
if h < w:
__lowercase , __lowercase =size, scale * w
else:
__lowercase , __lowercase =scale * h, size
__lowercase =int((1_3_3_3 / 8_0_0) * size)
if max(_lowerCAmelCase , _lowerCAmelCase) > max_size:
__lowercase =max_size / max(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =newh * scale
__lowercase =neww * scale
__lowercase , __lowercase =int(newh + 0.5), int(neww + 0.5)
__lowercase , __lowercase =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowercase =[]
for image in image_inputs:
__lowercase , __lowercase =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowercase =max(_lowerCAmelCase , key=lambda _lowerCAmelCase: item[0])[0]
__lowercase =max(_lowerCAmelCase , key=lambda _lowerCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =BridgeTowerImageProcessingTester(self)
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean'))
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std'))
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_lowerCAmelCase , 'size'))
self.assertTrue(hasattr(_lowerCAmelCase , 'size_divisor'))
def __lowerCamelCase ( self : str):
'''simple docstring'''
pass
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image)
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(_lowerCAmelCase , return_tensors='pt').pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray)
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(_lowerCAmelCase , return_tensors='pt').pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor)
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase =image_processing(_lowerCAmelCase , return_tensors='pt').pixel_values
__lowercase , __lowercase =self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 166
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =5
# Realm tok
__lowercase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase =os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
__lowercase =os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__lowercase =os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =RealmConfig(num_block_records=self.num_block_records)
return config
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_lowerCAmelCase , )
return block_records
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.get_config()
__lowercase =self.get_dummy_retriever()
__lowercase =retriever.tokenizer
__lowercase =np.array([0, 3] , dtype='long')
__lowercase =tokenizer(['Test question']).input_ids
__lowercase =tokenizer(
['the fourth'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
__lowercase =config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase =retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np')
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(len(_lowerCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.get_config()
__lowercase =self.get_dummy_retriever()
__lowercase =retriever.tokenizer
__lowercase =np.array([0, 3, 5] , dtype='long')
__lowercase =tokenizer(['Test question']).input_ids
__lowercase =tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
__lowercase =config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase =retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _lowerCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__lowercase =retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__lowercase =os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__lowercase =RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record')
| 166
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_lowercase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_lowercase , default=5 )
parser.add_argument('''--batch_size''' , type=_lowercase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_lowercase , default=1 )
parser.add_argument('''--freeze''' , type=_lowercase , default=_lowercase )
parser.add_argument('''--learning_rate''' , type=_lowercase , default=5E-4 )
parser.add_argument('''--seed''' , type=_lowercase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_lowercase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_lowercase , default=10 )
parser.add_argument('''--weight_decay''' , type=_lowercase , default=0.01 )
parser.add_argument('''--output_dir''' , type=_lowercase , default='''./results''' )
return parser.parse_args()
__UpperCAmelCase :str = load("accuracy")
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = eval_pred
__UpperCAmelCase : Any = np.argmax(_lowercase , axis=1 )
return metric.compute(predictions=_lowercase , references=_lowercase )
class a ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : str ) -> None:
super().__init__()
__UpperCAmelCase : Optional[Any] = trainer
def lowerCamelCase__ ( self : Dict , snake_case : Tuple , snake_case : List[str] , snake_case : Optional[int] , **snake_case : Optional[int] ) -> int:
if control.should_evaluate:
__UpperCAmelCase : Union[str, Any] = deepcopy(snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_args()
set_seed(args.seed )
__UpperCAmelCase : Dict = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__UpperCAmelCase : List[Any] = dataset.train_test_split(test_size=0.2 )
__UpperCAmelCase : List[str] = train_test['''test'''].train_test_split(test_size=0.5 )
__UpperCAmelCase : Tuple = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCAmelCase : str = tokenizer.eos_token
__UpperCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__UpperCAmelCase : List[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_lowercase : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = tokenizer(example['''src'''] , truncation=_lowercase , max_length=1024 )
__UpperCAmelCase : Union[str, Any] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__UpperCAmelCase : Tuple = train_test_validation.map(
_lowercase , batched=_lowercase , remove_columns=train_test_validation['''train'''].column_names , )
__UpperCAmelCase : Optional[Any] = DataCollatorWithPadding(tokenizer=_lowercase )
__UpperCAmelCase : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__UpperCAmelCase : Tuple = Trainer(
model=_lowercase , args=_lowercase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 371
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase :Tuple = "Create a default config file for Accelerate with only a few flags set."
def _a ( _lowercase : List[Any]="no" , _lowercase : str = default_json_config_file , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_lowercase )
path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase : int = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase : Optional[Any] = torch.cuda.device_count()
__UpperCAmelCase : List[str] = num_gpus
__UpperCAmelCase : int = False
if num_gpus > 1:
__UpperCAmelCase : Any = '''MULTI_GPU'''
else:
__UpperCAmelCase : int = '''NO'''
elif is_xpu_available() and use_xpu:
__UpperCAmelCase : List[Any] = torch.xpu.device_count()
__UpperCAmelCase : List[Any] = num_xpus
__UpperCAmelCase : Optional[int] = False
if num_xpus > 1:
__UpperCAmelCase : Any = '''MULTI_XPU'''
else:
__UpperCAmelCase : Optional[Any] = '''NO'''
elif is_npu_available():
__UpperCAmelCase : Dict = torch.npu.device_count()
__UpperCAmelCase : Any = num_npus
__UpperCAmelCase : Any = False
if num_npus > 1:
__UpperCAmelCase : Dict = '''MULTI_NPU'''
else:
__UpperCAmelCase : Optional[int] = '''NO'''
else:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Tuple = '''NO'''
__UpperCAmelCase : List[Any] = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def _a ( _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parser.add_parser('''default''' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase )
parser.add_argument(
'''--config_file''' , default=_lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowercase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowercase )
return parser
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 240
| 0
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments,) )
((a) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
a = True
a = True
a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=A, decoder_config=A, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
a = decoder_config.decoder_start_token_id
a = decoder_config.pad_token_id
if decoder_start_token_id is None:
a = decoder_config.bos_token_id
if pad_token_id is None:
a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
a = decoder_config.eos_token_id
a = decoder_start_token_id
a = pad_token_id
a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=[0, 1, 2, 3] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=[1, 3_84, 24, 24] , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , ) -> Optional[int]:
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = backbone_out_indices
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = num_labels
a = backbone_featmap_shape
a = scope
a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = DPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> str:
a = self.num_labels
a = DPTForDepthEstimation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple ) -> Any:
a = self.num_labels
a = DPTForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : str ) -> str:
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : int ) -> List[str]:
a = DPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
if model_class in get_values(__lowerCamelCase ):
continue
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a = model(**__lowerCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self : Dict ) -> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = False
a = True
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
a = model(**__lowerCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
a = model_class(config=__lowerCamelCase )
# Skip the check for the backbone
a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@slow
def __UpperCAmelCase ( self : str ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a = DPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = "add"
with self.assertRaises(__lowerCamelCase ):
a = DPTForDepthEstimation(__lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
a = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
a = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__lowerCamelCase )
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
a = outputs.predicted_depth
# verify the predicted depth
a = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __lowerCamelCase )
a = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __lowerCamelCase , atol=1e-4 ) )
| 107
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCamelCase__ ( cls ):
return cls()
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
return True
@register_to_config
def __init__( self ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = 1_00 ,UpperCAmelCase_ = 1.007 ,UpperCAmelCase_ = 80 ,UpperCAmelCase_ = 0.05 ,UpperCAmelCase_ = 50 ,):
pass
def lowerCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = () ):
_lowercase : Dict = jnp.arange(0 ,UpperCAmelCase_ )[::-1].copy()
_lowercase : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase_ ,schedule=jnp.array(UpperCAmelCase_ ,dtype=jnp.floataa ) ,timesteps=UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
if self.config.s_min <= sigma <= self.config.s_max:
_lowercase : Any = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
_lowercase : str = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowercase : Union[str, Any] = random.split(UpperCAmelCase_ ,num=1 )
_lowercase : str = self.config.s_noise * random.normal(key=UpperCAmelCase_ ,shape=sample.shape )
_lowercase : Optional[Any] = sigma + gamma * sigma
_lowercase : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Any = sample_hat + sigma_hat * model_output
_lowercase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
_lowercase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ ,derivative=UpperCAmelCase_ ,state=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Any = sample_prev + sigma_prev * model_output
_lowercase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
_lowercase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase_ ,derivative=UpperCAmelCase_ ,state=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
raise NotImplementedError()
| 351
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336
| 0
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=99 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Union[str, Any]=4 , ) ->str:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__UpperCAmelCase , )
return config, input_ids, attention_mask
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = FlaxDistilBertModelTester(self )
@slow
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained('''distilbert-base-uncased''' )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
@require_flax
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
a = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
a = (1, 11, 768)
self.assertEqual(output.shape , __UpperCAmelCase )
a = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4 ) )
| 0
|
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'distilbert'
UpperCamelCase__ = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , _a=30_522 , _a=512 , _a=False , _a=6 , _a=12 , _a=768 , _a=4 * 768 , _a=0.1 , _a=0.1 , _a="gelu" , _a=0.02 , _a=0.1 , _a=0.2 , _a=0 , **_a , ):
__magic_name__ : int = vocab_size
__magic_name__ : str = max_position_embeddings
__magic_name__ : Union[str, Any] = sinusoidal_pos_embds
__magic_name__ : str = n_layers
__magic_name__ : Optional[int] = n_heads
__magic_name__ : str = dim
__magic_name__ : List[str] = hidden_dim
__magic_name__ : Dict = dropout
__magic_name__ : Optional[Any] = attention_dropout
__magic_name__ : int = activation
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : List[str] = qa_dropout
__magic_name__ : Tuple = seq_classif_dropout
super().__init__(**_a , pad_token_id=_a )
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 41
|
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 0:
return False
__magic_name__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A_ :Any = logging.get_logger(__name__)
A_ :List[str] = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] ="""imagegpt"""
UpperCamelCase__ : int =["""past_key_values"""]
UpperCamelCase__ : Optional[int] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=512 + 1 , lowerCamelCase__=32 * 32 , lowerCamelCase__=512 , lowerCamelCase__=24 , lowerCamelCase__=8 , lowerCamelCase__=None , lowerCamelCase__="quick_gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : Optional[Any] =n_positions
__UpperCamelCase : List[Any] =n_embd
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : Dict =n_head
__UpperCamelCase : Union[str, Any] =n_inner
__UpperCamelCase : Union[str, Any] =activation_function
__UpperCamelCase : List[Any] =resid_pdrop
__UpperCamelCase : int =embd_pdrop
__UpperCamelCase : Tuple =attn_pdrop
__UpperCamelCase : Optional[Any] =layer_norm_epsilon
__UpperCamelCase : Tuple =initializer_range
__UpperCamelCase : Any =scale_attn_weights
__UpperCamelCase : int =use_cache
__UpperCamelCase : Union[str, Any] =scale_attn_by_inverse_layer_idx
__UpperCamelCase : int =reorder_and_upcast_attn
__UpperCamelCase : List[str] =tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ )
class __A ( a ):
"""simple docstring"""
@property
def __lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 3 , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self._generate_dummy_images(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =dict(preprocessor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) )
return inputs
| 71
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) != 32:
raise ValueError('''Input must be of length 32''')
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''08x''')[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''')
return little_endian_hex
def lowerCAmelCase (__A):
"""simple docstring"""
_a = b''''''
for char in message:
bit_string += format(__A , '''08b''').encode('''utf-8''')
_a = format(len(__A) , '''064b''').encode('''utf-8''')
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''')
for pos in range(0 , len(__A) , 512):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''032b''')
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A , 2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (a + b) % 2**32
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
if shift < 0:
raise ValueError('''Shift must be non-negative''')
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase (__A):
"""simple docstring"""
_a = preprocess(__A)
_a = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
_a = 0x67_452_301
_a = 0xEF_CDA_B89
_a = 0x98_BAD_CFE
_a = 0x10_325_476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(__A))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i]))
# Add hashed chunk to running total
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
UpperCamelCase__ = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
UpperCamelCase__ = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def lowercase_ ( self : int , _A : int , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
UpperCAmelCase__ : Union[str, Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
UpperCAmelCase__ : Optional[int] = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase )
return score
| 360
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = "ctrl"
lowerCAmelCase_ : List[str] = ["past_key_values"]
lowerCAmelCase_ : int = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a__=246_534 , a__=256 , a__=1_280 , a__=8_192 , a__=48 , a__=16 , a__=0.1 , a__=0.1 , a__=1e-6 , a__=0.0_2 , a__=True , **a__ , ) -> Any:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = dff
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
super().__init__(**a__ )
| 85
|
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase :ClassVar[Features] = Features({"audio": Audio()} )
_lowerCamelCase :ClassVar[Features] = Features({"labels": ClassLabel} )
_lowerCamelCase :str = "audio"
_lowerCamelCase :str = "labels"
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase__ : str = copy.deepcopy(self )
lowerCAmelCase__ : Optional[int] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 212
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase :ClassVar[Features] = Features({"audio": Audio()} )
_lowerCamelCase :ClassVar[Features] = Features({"labels": ClassLabel} )
_lowerCamelCase :str = "audio"
_lowerCamelCase :str = "labels"
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase__ : str = copy.deepcopy(self )
lowerCAmelCase__ : Optional[int] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 212
| 1
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase__ : List[Any] = '''src/transformers'''
lowercase__ : List[Any] = '''docs/source/en'''
lowercase__ : Tuple = '''.'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : Optional[Any] , __snake_case : int ) -> Union[str, Any]:
with open(__snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
# Find the start prompt.
__A : str = 0
while not lines[start_index].startswith(__snake_case ):
start_index += 1
start_index += 1
__A : Tuple = start_index
while not lines[end_index].startswith(__snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase__ : str = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowercase__ : List[str] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowercase__ : List[str] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ : List[Any] = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
__A : List[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __snake_case )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase ( __snake_case : int , __snake_case : str ) -> List[str]:
__A : Optional[Any] = 2 if text == '✅' or text == '❌' else len(__snake_case )
__A : List[str] = (width - text_length) // 2
__A : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCAmelCase ( ) -> int:
__A : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__A : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__A : Tuple = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__A : Optional[int] = collections.defaultdict(__snake_case )
__A : Dict = collections.defaultdict(__snake_case )
__A : Optional[Any] = collections.defaultdict(__snake_case )
__A : Any = collections.defaultdict(__snake_case )
__A : Tuple = collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(__snake_case ):
__A : Tuple = None
if attr_name.endswith('Tokenizer' ):
__A : str = slow_tokenizers
__A : List[Any] = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__A : Tuple = fast_tokenizers
__A : int = attr_name[:-13]
elif _re_tf_models.match(__snake_case ) is not None:
__A : Optional[int] = tf_models
__A : Optional[Any] = _re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
__A : List[Any] = flax_models
__A : Optional[Any] = _re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
__A : List[Any] = pt_models
__A : Any = _re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
__A : Dict = True
break
# Try again after removing the last word in the name
__A : Optional[int] = ''.join(camel_case_split(__snake_case )[:-1] )
# Let's build that table!
__A : List[str] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__A : List[str] = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__A : str = [len(__snake_case ) + 2 for c in columns]
__A : Tuple = max([len(__snake_case ) for name in model_names] ) + 2
# Build the table per se
__A : Any = '|' + '|'.join([_center_text(__snake_case , __snake_case ) for c, w in zip(__snake_case , __snake_case )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__A : Union[str, Any] = {True: '✅', False: '❌'}
for name in model_names:
__A : List[str] = model_name_to_prefix[name]
__A : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__snake_case , __snake_case ) for l, w in zip(__snake_case , __snake_case )] ) + "|\n"
return table
def _lowerCAmelCase ( __snake_case : Dict=False ) -> str:
__A ,__A ,__A ,__A : List[str] = _find_text_in_file(
filename=os.path.join(__snake_case , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__A : Optional[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__snake_case , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : Union[str, Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 190
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = self.image_processor
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
__A : int = self.tokenizer
__A : Optional[Any] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
# add pixel_values
__A : List[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase)
if text is not None:
__A : Optional[Any] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
else:
__A : int = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase)
return encoding_image_processor
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.tokenizer.model_input_names
__A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 190
| 1
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Any:
if n_term == "":
return []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
a :Any = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 363
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number | (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number & ~(1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number ^ (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
for attribute in key.split("." ):
__lowerCAmelCase: Optional[int] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
__lowerCAmelCase: Optional[int] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
__lowerCAmelCase: Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowerCAmelCase: Dict = value
elif weight_type == "weight_g":
__lowerCAmelCase: List[str] = value
elif weight_type == "weight_v":
__lowerCAmelCase: Any = value
elif weight_type == "bias":
__lowerCAmelCase: List[str] = value
else:
__lowerCAmelCase: int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: int = []
__lowerCAmelCase: List[Any] = fairseq_model.state_dict()
__lowerCAmelCase: Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase: Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__lowerCAmelCase: List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase: Dict = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase: List[str] = True
if "*" in mapped_key:
__lowerCAmelCase: Dict = name.split(__lowerCAmelCase )[0].split("." )[-2]
__lowerCAmelCase: Optional[int] = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
__lowerCAmelCase: Optional[Any] = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase: Any = "weight_v"
elif "bias" in name:
__lowerCAmelCase: int = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase: Tuple = "weight"
else:
__lowerCAmelCase: List[Any] = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Dict = full_name.split("conv_layers." )[-1]
__lowerCAmelCase: List[str] = name.split("." )
__lowerCAmelCase: List[Any] = int(items[0] )
__lowerCAmelCase: Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowerCAmelCase: Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowerCAmelCase: Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__lowerCAmelCase: str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__lowerCAmelCase: List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
__lowerCAmelCase: str = UniSpeechSatConfig.from_pretrained(__lowerCAmelCase )
else:
__lowerCAmelCase: List[str] = UniSpeechSatConfig()
__lowerCAmelCase: Optional[Any] = ""
if is_finetuned:
__lowerCAmelCase: str = UniSpeechSatForCTC(__lowerCAmelCase )
else:
__lowerCAmelCase: Optional[Any] = UniSpeechSatForPreTraining(__lowerCAmelCase )
__lowerCAmelCase: List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
__lowerCAmelCase: Union[str, Any] = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 217
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] =logging.get_logger()
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : List[nn.Module] = field(default_factory=A__ )
_lowerCAmelCase : list = field(default_factory=A__ )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : nn.Module
_lowerCAmelCase : int = 0
_lowerCAmelCase : List = field(default_factory=A__ )
_lowerCAmelCase : List = field(default_factory=A__ )
def __call__( self : Any , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = Tracker(self.dest )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : Any = Tracker(self.src )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : str = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.src_skip , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.dest_skip , SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise Exception(
F'Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> Optional[int]:
print(f'Converting {name}...' )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
UpperCamelCase__ : List[Any] = ResNetForImageClassification(__lowerCAmelCase ).eval()
UpperCamelCase__ : Optional[Any] = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
UpperCamelCase__ : int = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase__ : List[Any] = f'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
UpperCamelCase__ : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> Dict:
UpperCamelCase__ : Dict = "imagenet-1k-id2label.json"
UpperCamelCase__ : Optional[int] = 1000
UpperCamelCase__ : Any = (1, num_labels)
UpperCamelCase__ : Union[str, Any] = "huggingface/label-files"
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : str = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ : Optional[int] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase : int =parser.parse_args()
lowerCamelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 189
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
a_ : int = """data2vec-text"""
def __init__( self : Dict , a_ : Dict=3_05_22 , a_ : Any=7_68 , a_ : Dict=12 , a_ : int=12 , a_ : int=30_72 , a_ : Optional[int]="gelu" , a_ : int=0.1 , a_ : int=0.1 , a_ : List[Any]=5_12 , a_ : Any=2 , a_ : str=0.02 , a_ : Any=1e-1_2 , a_ : Optional[int]=1 , a_ : List[Any]=0 , a_ : Union[str, Any]=2 , a_ : Optional[int]="absolute" , a_ : List[str]=True , a_ : str=None , **a_ : Dict , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self : Tuple ):
if self.task == "multiple-choice":
lowerCAmelCase_ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase_ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 351
|
"""simple docstring"""
import argparse
import os
import re
lowercase__ = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(r"""\[([^\]]+)\]""")
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = _re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , __UpperCamelCase=None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
lowerCAmelCase_ : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
lowerCAmelCase_ : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Any = []
else:
blocks.append("\n".join(__UpperCamelCase ) )
lowerCAmelCase_ : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append("\n".join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
def _inner(__UpperCamelCase ):
return key(__UpperCamelCase ).lower().replace("_" , "" )
return _inner
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None ) -> List[str]:
"""simple docstring"""
def noop(__UpperCamelCase ):
return x
if key is None:
lowerCAmelCase_ : Optional[int] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : int = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
lowerCAmelCase_ : Dict = ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
def _replace(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCAmelCase_ : Optional[int] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) + "]"
lowerCAmelCase_ : Union[str, Any] = import_statement.split("\n" )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : Optional[int] = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ : Optional[Any] = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : List[Any] = sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )
lowerCAmelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Any = keys[:-1]
lowerCAmelCase_ : Dict = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : List[str] = _re_bracket_content.sub(_replace , __UpperCamelCase )
return import_statement
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=True ) -> Optional[int]:
"""simple docstring"""
with open(__UpperCamelCase , encoding="utf-8" ) as f:
lowerCAmelCase_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : int = split_code_in_indented_blocks(
__UpperCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : Optional[int] = main_blocks[block_idx]
lowerCAmelCase_ : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ : str = 0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : Tuple = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : List[Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Dict = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : Any = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
lowerCAmelCase_ : Union[str, Any] = [x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : str = []
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__UpperCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase=True ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase_ : Dict = sort_imports(os.path.join(__UpperCamelCase , "__init__.py" ) , check_only=__UpperCamelCase )
if result:
lowerCAmelCase_ : Union[str, Any] = [os.path.join(__UpperCamelCase , "__init__.py" )]
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__UpperCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 161
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : List[str] = {
'google/rembert': 256,
}
UpperCAmelCase : List[Any] = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = RemBertTokenizer
def __init__( self : Optional[int] , UpperCamelCase : str=None , UpperCamelCase : Tuple=None , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Optional[Any]="[SEP]" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Dict="[CLS]" , UpperCamelCase : Union[str, Any]="[MASK]" , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : Tuple = do_lower_case
__UpperCAmelCase : List[str] = remove_space
__UpperCAmelCase : Dict = keep_accents
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase ) )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 320
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ = 16
A__ = 32
def _UpperCAmelCase ( snake_case , snake_case = 16 , snake_case = "bert-base-cased" ):
"""simple docstring"""
_lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case )
_lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
_lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
model.eval()
_lowerCAmelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case ) - 1:
_lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case , references=snake_case , )
_lowerCAmelCase = metric.compute()
return eval_metric["accuracy"]
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config["""lr"""]
_lowerCAmelCase = int(config["""num_epochs"""] )
_lowerCAmelCase = int(config["""seed"""] )
_lowerCAmelCase = int(config["""batch_size"""] )
_lowerCAmelCase = args.model_name_or_path
set_seed(snake_case )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(snake_case , snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case , return_dict=snake_case )
# Instantiate optimizer
_lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase = 1
_lowerCAmelCase = (len(snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=0 , num_training_steps=snake_case , )
else:
_lowerCAmelCase = DummyScheduler(snake_case , total_num_steps=snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase = 0
_lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
_lowerCAmelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase = int(snake_case ) + 1
_lowerCAmelCase = evaluation_loop(snake_case , snake_case , snake_case , snake_case )
accelerator.print("""resumed checkpoint performance:""" , snake_case )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
_lowerCAmelCase = json.load(snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase = {}
for epoch in range(snake_case , snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase = F'epoch_{epoch}'
_lowerCAmelCase = os.path.join(args.output_dir , snake_case )
accelerator.save_state(snake_case )
_lowerCAmelCase = evaluation_loop(snake_case , snake_case , snake_case , snake_case )
_lowerCAmelCase = accuracy
_lowerCAmelCase = lr_scheduler.get_lr()[0]
_lowerCAmelCase = optimizer.param_groups[0]["""lr"""]
_lowerCAmelCase = epoch
_lowerCAmelCase = overall_step
accelerator.print(F'epoch {epoch}:' , snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(snake_case , snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case , )
parser.add_argument(
"""--output_dir""" , type=snake_case , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=snake_case , default=snake_case , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=snake_case , default=snake_case , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case , default=2 , help="""Number of train epochs.""" , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 82
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 1
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase__ ( lowerCamelCase : int = 1000000 ,lowerCamelCase : int = 10 ):
_A : defaultdict = defaultdict(lowerCamelCase )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_A : Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCamelCase ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 350
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size for training."} )
a = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a = field(
default=1_0000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a = field(default=2E-4 , metadata={"help": "Learning rate fo training."} )
a = field(default="cosine" , metadata={"help": "Learning rate."} )
a = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
a = field(
default=a_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a = field(default=5_0000 , metadata={"help": "Maximum number of training steps."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
a = field(default=1 , metadata={"help": "Training seed."} )
a = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a = field(
default=a_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a = field(default=a_ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(default=a_ , metadata={"help": "Number of workers used for code evaluation."} )
a = field(
default=a_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a = field(
default=a_ , metadata={"help": "Sample from the language model's output distribution."} )
a = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
a = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
a = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default=a_ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a = field(
default=10_0000 , metadata={"help": "Number of files to save per JSON output file."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a = field(
default=a_ , metadata={"help": "If True, near-duplicate samples are removed."} )
a = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(default=20_0000 , metadata={"help": "Number of examples to train tokenizer on."} )
a = field(
default=3_2768 , metadata={"help": "Number of examples to train the tokenizer on."} )
a = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a = field(default=a_ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a = field(default=a_ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a = field(default=a_ , metadata={"help": "Push saved tokenizer to the hub."} )
| 227
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 100 , ):
_lowerCamelCase : Any = x_start
_lowerCamelCase : Optional[int] = fnc(lowercase__ )
_lowerCamelCase : str = 0.0
for _ in range(lowercase__ ):
# Approximates curve as a sequence of linear lines and sums their length
_lowerCamelCase : str = (x_end - x_start) / steps + xa
_lowerCamelCase : Any = fnc(lowercase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_lowerCamelCase : List[Any] = xa
_lowerCamelCase : Dict = fxa
return length
if __name__ == "__main__":
def _snake_case ( lowercase__ ):
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase__ = 10
while i <= 10_0000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 96
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states
| 96
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class a__ ( _A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'mra'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=1 , __lowercase=0.0_2 , __lowercase=1e-5 , __lowercase="absolute" , __lowercase=4 , __lowercase="full" , __lowercase=0 , __lowercase=0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = block_per_row
__lowerCAmelCase = approx_mode
__lowerCAmelCase = initial_prior_first_n_blocks
__lowerCAmelCase = initial_prior_diagonal_n_blocks
| 362
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9
| 0
|
'''simple docstring'''
class _lowercase :
def __init__( self: str , UpperCamelCase__: str = "" , UpperCamelCase__: bool = False ):
# Mapping from the first character of the prefix of the node
lowerCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCamelCase__ : str = is_leaf
lowerCamelCase__ : Optional[Any] = prefix
def lowerCamelCase_ ( self: int , UpperCamelCase__: str ):
lowerCamelCase__ : Tuple = 0
for q, w in zip(self.prefix , UpperCamelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowerCamelCase__ : List[str] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCamelCase__ : Dict = RadixNode(prefix=UpperCamelCase__ , is_leaf=UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = self.nodes[word[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = incoming_node.match(
UpperCamelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCamelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCamelCase__ : Optional[int] = remaining_prefix
lowerCamelCase__ : Union[str, Any] = self.nodes[matching_string[0]]
lowerCamelCase__ : List[str] = RadixNode(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = aux_node
if remaining_word == "":
lowerCamelCase__ : List[str] = True
else:
self.nodes[matching_string[0]].insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCamelCase__ )
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = incoming_node.match(
UpperCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.nodes.get(word[0] , UpperCamelCase__ )
if not incoming_node:
return False
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = incoming_node.match(
UpperCamelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCamelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCamelCase__ : Dict = list(self.nodes.values() )[0]
lowerCamelCase__ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCamelCase__ : List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCamelCase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
lowerCamelCase__ : Dict = list(incoming_node.nodes.values() )[0]
lowerCamelCase__ : List[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCamelCase__ : Optional[int] = merging_node.nodes
return True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : Optional[Any] = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Any = RadixNode()
root.insert_many(UpperCamelCase )
assert all(root.find(UpperCamelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
lowerCamelCase__ : int = RadixNode()
lowerCamelCase__ : Optional[int] = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(UpperCamelCase )
print("""Words:""" , UpperCamelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def A_ ( self : List[str] , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase__ : str = [tuple(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else key for key in keys]
lowerCamelCase__ : Optional[Any] = Counter(UpperCAmelCase )
lowerCamelCase__ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def A_ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=False ) -> int:
lowerCamelCase__ : int = super().construct_mapping(UpperCAmelCase , deep=UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase )
return mapping
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple[Optional[str], str]:
lowerCamelCase__ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase__ : List[str] = full_content[1:].index('---' ) + 1
lowerCamelCase__ : Tuple = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
# class attributes
UpperCAmelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def A_ ( cls : str , UpperCAmelCase : Path ) -> "DatasetMetadata":
with open(UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase )
else:
return cls()
def A_ ( self : List[str] , UpperCAmelCase : Path ) -> Any:
if path.exists():
with open(UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCamelCase__ : Any = readme_file.read()
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[str] = self._to_readme(UpperCAmelCase )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[str] = None ) -> str:
if readme_content is not None:
lowerCamelCase__ , lowerCamelCase__ : int = _split_yaml_from_readme(UpperCAmelCase )
lowerCamelCase__ : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowerCamelCase__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def A_ ( cls : Union[str, Any] , UpperCAmelCase : str ) -> "DatasetMetadata":
lowerCamelCase__ : Any = yaml.load(UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase__ : Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> str:
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase , allow_unicode=UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' )
_UpperCAmelCase : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase : Tuple = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase : str = ap.parse_args()
_UpperCAmelCase : Optional[int] = Path(args.readme_filepath)
_UpperCAmelCase : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 45
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_UpperCAmelCase : Any = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCAmelCase__ = datasets.Audio()
UpperCAmelCase__ = """audio"""
UpperCAmelCase__ = AudioFolderConfig
UpperCAmelCase__ = 42 # definition at the bottom of the script
UpperCAmelCase__ = AudioClassification(audio_column="""audio""", label_column="""label""" )
_UpperCAmelCase : Union[str, Any] = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_UpperCAmelCase : Union[str, Any] = AUDIO_EXTENSIONS
| 45
| 1
|
'''simple docstring'''
from manim import *
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : str = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
lowerCAmelCase_ : Tuple = VGroup(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
lowerCAmelCase_ : int = Text("""CPU""" , font_size=24 )
lowerCAmelCase_ : int = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Tuple = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
lowerCAmelCase_ : List[Any] = Text("""GPU""" , font_size=24 )
lowerCAmelCase_ : Optional[Any] = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase )
lowerCAmelCase_ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
lowerCAmelCase_ : Optional[Any] = Text("""Model""" , font_size=24 )
lowerCAmelCase_ : Optional[int] = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , buff=0.5 , aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase )
lowerCAmelCase_ : Tuple = []
for i, rect in enumerate(lowerCamelCase ):
rect.set_stroke(lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase_ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase , buff=0.0 )
self.add(lowerCamelCase )
cpu_targs.append(lowerCamelCase )
lowerCAmelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*lowerCamelCase ).arrange(lowerCamelCase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text("""Loaded Checkpoint""" , font_size=24 )
lowerCAmelCase_ : Union[str, Any] = Group(lowerCamelCase , lowerCamelCase ).arrange(lowerCamelCase , aligned_edge=lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase_ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : int = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCAmelCase_ : str = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase ) , Write(lowerCamelCase ) )
self.play(Write(lowerCamelCase , run_time=1 ) , Create(lowerCamelCase , run_time=1 ) )
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Tuple = []
for i, rect in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Union[str, Any] = fill.copy().set_fill(lowerCamelCase , opacity=0.7 )
target.move_to(lowerCamelCase )
first_animations.append(GrowFromCenter(lowerCamelCase , run_time=1 ) )
lowerCAmelCase_ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase , run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(*lowerCamelCase )
self.wait()
| 120
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 10_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120
| 1
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[Any]:
A = psutil.Process()
A = False
def UpperCamelCase__ ( self ) -> Optional[int]:
A = -1
while True:
A = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCamelCase__ ( self ) -> Any:
A = True
A = threading.Thread(target=self.peak_monitor )
A = True
self.thread.start()
def UpperCamelCase__ ( self ) -> List[str]:
A = False
self.thread.join()
return self.cpu_memory_peak
UpperCAmelCase =PeakCPUMemory()
def _A ( ):
"""simple docstring"""
A = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
A = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
A = torch.cuda.memory_allocated(_a )
torch.cuda.reset_peak_memory_stats()
return measures
def _A ( _a : int ):
"""simple docstring"""
A = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
A = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
A = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
A = (torch.cuda.memory_allocated(_a ) - start_measures[str(_a )]) / 2**2_0
A = (torch.cuda.max_memory_allocated(_a ) - start_measures[str(_a )]) / 2**2_0
return measures
def _A ( _a : int , _a : Dict ):
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(_a )]:.2f}MiB' )
A = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 351
|
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
|
import qiskit
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowercase = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 210
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 1
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase_ (UpperCamelCase__ : List[str] ): # picklable for multiprocessing
return x.sum()
def lowerCamelCase_ (UpperCamelCase__ : str ): # picklable for multiprocessing
return i + 1
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =42
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = {}
_UpperCAmelCase : int = []
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = [1, 2]
_UpperCAmelCase : Dict = {'''a''': 1, '''b''': 2}
_UpperCAmelCase : Optional[int] = {'''a''': [1, 2], '''b''': [3, 4]}
_UpperCAmelCase : str = {'''a''': {'''1''': 1}, '''b''': 2}
_UpperCAmelCase : Optional[int] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = [2, 3]
_UpperCAmelCase : str = {'''a''': 2, '''b''': 3}
_UpperCAmelCase : Optional[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
_UpperCAmelCase : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3}
_UpperCAmelCase : Dict = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
_UpperCAmelCase : List[Any] = 2
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
_UpperCAmelCase : Tuple = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
_UpperCAmelCase : Dict = {'''a''': 2, '''b''': 0, '''c''': 2}
_UpperCAmelCase : str = {
'''a''': np.eye(2 ).astype(A ),
'''b''': np.zeros(3 ).astype(A ),
'''c''': np.ones(2 ).astype(A ),
}
self.assertEqual(map_nested(A , A , map_numpy=A ) , A )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A , A , map_numpy=A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A , A , map_numpy=A , num_proc=A ) , A )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A , A , map_numpy=A , num_proc=A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A ): # can't pickle a local lambda
map_nested(lambda A : x + 1 , A , num_proc=A )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Optional[Any] = {'''a''': 1, '''b''': 2}
_UpperCAmelCase : Optional[Any] = {'''a''': 3, '''b''': 4}
_UpperCAmelCase : Any = {'''a''': 5, '''b''': 6}
_UpperCAmelCase : Dict = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A , A , A ) ) , A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
class _UpperCAmelCase :
'''simple docstring'''
a__ ='''bar'''
_UpperCAmelCase : Tuple = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(A , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
_UpperCAmelCase : List[Any] = {F'{i}': i for i in range(UpperCamelCase__ )}
_UpperCAmelCase : Optional[Any] = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _UpperCAmelCase ( a ):
'''simple docstring'''
@require_tf
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import tensorflow as tf
from tensorflow.keras import layers
_UpperCAmelCase : Optional[int] = layers.Dense(2 )
def gen_random_output():
_UpperCAmelCase : Tuple = tf.random.uniform((1, 3) )
return model(A ).numpy()
with temp_seed(4_2 , set_tensorflow=A ):
_UpperCAmelCase : List[str] = gen_random_output()
with temp_seed(4_2 , set_tensorflow=A ):
_UpperCAmelCase : Optional[Any] = gen_random_output()
_UpperCAmelCase : str = gen_random_output()
np.testing.assert_equal(A , A )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
import torch
def gen_random_output():
_UpperCAmelCase : Tuple = torch.nn.Linear(3 , 2 )
_UpperCAmelCase : Tuple = torch.rand(1 , 3 )
return model(A ).detach().numpy()
with temp_seed(4_2 , set_pytorch=A ):
_UpperCAmelCase : Optional[Any] = gen_random_output()
with temp_seed(4_2 , set_pytorch=A ):
_UpperCAmelCase : Optional[Any] = gen_random_output()
_UpperCAmelCase : str = gen_random_output()
np.testing.assert_equal(A , A )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Any:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_UpperCAmelCase : List[str] = gen_random_output()
with temp_seed(4_2 ):
_UpperCAmelCase : int = gen_random_output()
_UpperCAmelCase : Dict = gen_random_output()
np.testing.assert_equal(A , A )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Optional[int] = NestedDataStructure(UpperCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
_UpperCAmelCase : str = NestedDataStructure(UpperCamelCase__ ).flatten()
assert output == expected_output
def lowerCamelCase_ ():
_UpperCAmelCase : Dict = A(x=1 , y='''foobar''' )
_UpperCAmelCase : str = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(UpperCamelCase__ ) == expected_output
_UpperCAmelCase : int = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
_UpperCAmelCase : Tuple = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(UpperCamelCase__ ) == expected_output
with pytest.raises(UpperCamelCase__ ):
asdict([1, A(x=10 , y='''foo''' )] )
def lowerCamelCase_ (UpperCamelCase__ : str ):
return text.split()
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase_ ():
with Pool(2 ) as pool:
_UpperCAmelCase : Tuple = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(UpperCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_UpperCAmelCase : str = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(UpperCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_UpperCAmelCase : Dict = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(UpperCamelCase__ ) == 4
| 263
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TextToVideoSDPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
__lowercase =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0)
__lowercase =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
__lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__lowercase =CLIPTextModel(_lowerCAmelCase)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any]=0):
'''simple docstring'''
if str(_lowerCAmelCase).startswith('mps'):
__lowercase =torch.manual_seed(_lowerCAmelCase)
else:
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase =self.get_dummy_components()
__lowercase =TextToVideoSDPipeline(**_lowerCAmelCase)
__lowercase =sd_pipe.to(_lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase =self.get_dummy_inputs(_lowerCAmelCase)
__lowercase ='np'
__lowercase =sd_pipe(**_lowerCAmelCase).frames
__lowercase =frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowerCAmelCase , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowerCAmelCase , expected_max_diff=1e-2)
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def __lowerCamelCase ( self : int):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy')
__lowercase =TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
__lowercase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowercase =pipe.to('cuda')
__lowercase ='Spiderman is surfing'
__lowercase =torch.Generator(device='cpu').manual_seed(0)
__lowercase =pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2_5 , output_type='pt').frames
__lowercase =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy')
__lowercase =TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
__lowercase =pipe.to('cuda')
__lowercase ='Spiderman is surfing'
__lowercase =torch.Generator(device='cpu').manual_seed(0)
__lowercase =pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='pt').frames
__lowercase =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 361
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CycleDiffusionPipeline
_UpperCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
_UpperCAmelCase :Any = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCAmelCase :Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
_UpperCAmelCase :List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: int = CLIPTextModel(_UpperCAmelCase )
lowercase__: Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__: str = image / 2 + 0.5
if str(_UpperCAmelCase ).startswith('''mps''' ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__: Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__: str = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = CycleDiffusionPipeline(**_UpperCAmelCase )
lowercase__: Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Tuple = pipe(**_UpperCAmelCase )
lowercase__: Any = output.images
lowercase__: int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__: Any = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_UpperCAmelCase , '''half''' ):
lowercase__: str = module.half()
lowercase__: List[Any] = CycleDiffusionPipeline(**_UpperCAmelCase )
lowercase__: List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Dict = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = pipe(**_UpperCAmelCase )
lowercase__: List[Any] = output.images
lowercase__: int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__: int = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _snake_case ( self ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _snake_case ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _snake_case ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _snake_case ( self ):
return super().test_save_load_optional_components()
@skip_mps
def _snake_case ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowercase__: Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowercase__: List[str] = init_image.resize((512, 512) )
lowercase__: Dict = '''CompVis/stable-diffusion-v1-4'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Optional[int] = CycleDiffusionPipeline.from_pretrained(
_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: int = '''A black colored car'''
lowercase__: List[str] = '''A blue colored car'''
lowercase__: Optional[int] = torch.manual_seed(0 )
lowercase__: Dict = pipe(
prompt=_UpperCAmelCase , source_prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_UpperCAmelCase , output_type='''np''' , )
lowercase__: Optional[int] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _snake_case ( self ):
lowercase__: Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowercase__: List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowercase__: Tuple = init_image.resize((512, 512) )
lowercase__: Any = '''CompVis/stable-diffusion-v1-4'''
lowercase__: Tuple = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Optional[int] = CycleDiffusionPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = '''A black colored car'''
lowercase__: str = '''A blue colored car'''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: Tuple = pipe(
prompt=_UpperCAmelCase , source_prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_UpperCAmelCase , output_type='''np''' , )
lowercase__: List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 177
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: List[Any] = {}
lowercase__: List[Any] = {}
lowercase__: Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__: Dict = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowercase__: Any = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowercase__: Optional[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__: Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowercase__: Tuple = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowercase__: List[str] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowercase__: str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowercase__: List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowercase__: Dict = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowercase__: int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ):
lowercase__: Union[str, Any] = load_image(_UpperCAmelCase )
lowercase__: Dict = self.image_processor.size['''longest_edge''']
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
lowercase__: Tuple = self.get_inference_context()
with inference_context():
lowercase__: Optional[Any] = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
lowercase__: Any = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
lowercase__: Tuple = image_embeddings
lowercase__: Optional[Any] = grid_points.shape[1]
lowercase__: Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = grid_points[:, i : i + points_per_batch, :, :]
lowercase__: int = input_labels[:, i : i + points_per_batch]
lowercase__: Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0.88 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ):
lowercase__: List[Any] = model_inputs.pop('''input_boxes''' )
lowercase__: List[Any] = model_inputs.pop('''is_last''' )
lowercase__: Any = model_inputs.pop('''original_sizes''' ).tolist()
lowercase__: Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
lowercase__: List[Any] = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__: int = model_outputs['''pred_masks''']
lowercase__: str = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
lowercase__: str = model_outputs['''iou_scores''']
lowercase__, lowercase__, lowercase__: Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ):
lowercase__: int = []
lowercase__: str = []
lowercase__: List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
lowercase__: Any = torch.cat(_UpperCAmelCase )
lowercase__: Dict = torch.cat(_UpperCAmelCase )
lowercase__, lowercase__, lowercase__, lowercase__: Any = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
lowercase__: Any = {}
if output_rle_mask:
lowercase__: Optional[Any] = rle_mask
if output_bboxes_mask:
lowercase__: Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 177
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__snake_case = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _snake_case ( self: Any ):
__lowerCamelCase : Any = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
__lowerCamelCase : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
__lowerCamelCase : Optional[int] = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] )
__lowerCamelCase : Dict = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(a ) , [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
] , )
__lowerCamelCase : Optional[int] = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
# Legacy behavior
__lowerCamelCase : List[Any] = text_classifier('This is great !' , return_all_scores=a )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
__lowerCamelCase : Optional[Any] = text_classifier('This is great !' , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] )
__lowerCamelCase : Any = text_classifier(['This is great !', 'Something else'] , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
] , )
__lowerCamelCase : Tuple = text_classifier(['This is great !', 'Something else'] , return_all_scores=a )
self.assertEqual(
nested_simplify(a ) , [
{'label': 'LABEL_0', 'score': 0.5_0_4},
{'label': 'LABEL_0', 'score': 0.5_0_4},
] , )
@require_torch
def _snake_case ( self: Dict ):
import torch
__lowerCamelCase : Any = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
__lowerCamelCase : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@require_tf
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : str = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
__lowerCamelCase : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@slow
@require_torch
def _snake_case ( self: int ):
__lowerCamelCase : List[str] = pipeline('text-classification' )
__lowerCamelCase : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 1.0}] )
__lowerCamelCase : int = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
__lowerCamelCase : int = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
@slow
@require_tf
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = pipeline('text-classification' , framework='tf' )
__lowerCamelCase : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 1.0}] )
__lowerCamelCase : str = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(a ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
__lowerCamelCase : int = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(a ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
def _snake_case ( self: Optional[Any] , a: str , a: List[Any] , a: List[str] ):
__lowerCamelCase : str = TextClassificationPipeline(model=a , tokenizer=a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _snake_case ( self: List[str] , a: str , a: Optional[int] ):
__lowerCamelCase : Any = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowerCamelCase : int = 'HuggingFace is in'
__lowerCamelCase : int = text_classifier(a )
self.assertEqual(nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
__lowerCamelCase : Optional[Any] = ['HuggingFace is in ', 'Paris is in France']
__lowerCamelCase : Optional[Any] = text_classifier(a )
self.assertEqual(
nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}, {'label': ANY(a ), 'score': ANY(a )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowerCamelCase : Union[str, Any] = text_classifier(a , top_k=a )
__lowerCamelCase : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(a ) , [[{'label': ANY(a ), 'score': ANY(a )}] * N, [{'label': ANY(a ), 'score': ANY(a )}] * N] , )
__lowerCamelCase : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
__lowerCamelCase : Optional[Any] = text_classifier(a )
self.assertEqual(
nested_simplify(a ) , {'label': ANY(a ), 'score': ANY(a )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowerCamelCase : Optional[int] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(a ):
text_classifier(a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowerCamelCase : List[Any] = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(a ) , [{'label': ANY(a ), 'score': ANY(a )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 194
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = prime_factors(SCREAMING_SNAKE_CASE__ )
if is_square_free(SCREAMING_SNAKE_CASE__ ):
return -1 if len(SCREAMING_SNAKE_CASE__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194
| 1
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
__a = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__a = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__a = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowerCAmelCase_ ( self: int , snake_case: Optional[Any]=None , snake_case: Dict=None , snake_case: Any=False ) -> Optional[int]:
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
snake_case_ :List[str] = 0
snake_case_ :Dict = 0
for prediction, reference in zip(snake_case , snake_case ):
snake_case_ :List[str] = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 66
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase :str = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase :Any = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
__magic_name__ : str = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ):
"""simple docstring"""
if model_type == "text":
__magic_name__ : Tuple = BarkSemanticModel
__magic_name__ : Optional[int] = BarkSemanticConfig
__magic_name__ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__magic_name__ : List[str] = BarkCoarseModel
__magic_name__ : Dict = BarkCoarseConfig
__magic_name__ : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__magic_name__ : Optional[Any] = BarkFineModel
__magic_name__ : Dict = BarkFineConfig
__magic_name__ : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
__magic_name__ : int = f'{model_type}_small' if use_small else model_type
__magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase )
# this is a hack
__magic_name__ : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__magic_name__ : Dict = model_args['vocab_size']
__magic_name__ : Optional[int] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__magic_name__ : Optional[Any] = model_args.pop('n_head' )
__magic_name__ : List[str] = model_args.pop('n_embd' )
__magic_name__ : List[Any] = model_args.pop('n_layer' )
__magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] )
__magic_name__ : Any = ModelClass(config=lowerCAmelCase )
__magic_name__ : List[str] = GenerationConfigClass()
__magic_name__ : List[Any] = model_generation_config
__magic_name__ : str = checkpoint['model']
# fixup checkpoint
__magic_name__ : str = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__magic_name__ : Tuple = k[len(lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
__magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCAmelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
__magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase )
__magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' )
model.eval()
model.to(lowerCAmelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__magic_name__ : List[str] = 'cpu' # do conversion on cpu
__magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase )
__magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
# load bark initial model
__magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
if model_type == "text":
__magic_name__ : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__magic_name__ : Union[str, Any] = 5
__magic_name__ : Optional[int] = 10
if model_type in ["text", "coarse"]:
__magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__magic_name__ : List[str] = bark_model(lowerCAmelCase )[0]
__magic_name__ : Optional[int] = model(lowerCAmelCase )
# take last logits
__magic_name__ : int = output_new_model_total.logits[:, [-1], :]
else:
__magic_name__ : Tuple = 3
__magic_name__ : List[str] = 8
__magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Dict = BarkConfig.from_sub_model_configs(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__magic_name__ : int = BarkModel(lowerCAmelCase )
__magic_name__ : List[str] = semantic
__magic_name__ : Optional[int] = coarseAcoustic
__magic_name__ : List[str] = fineAcoustic
__magic_name__ : int = codec
__magic_name__ : Union[str, Any] = bark_generation_config
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase :Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 364
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = LxmertTokenizer
A_ : List[Any] = LxmertTokenizerFast
A_ : int = True
A_ : Any = True
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
super().setUp()
__magic_name__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Any , _A : str ) -> List[Any]:
__magic_name__ : Dict = 'UNwant\u00E9d,running'
__magic_name__ : Dict = 'unwanted, running'
return input_text, output_text
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
__magic_name__ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def __lowerCAmelCase ( self : int ) -> List[Any]:
if not self.test_rust_tokenizer:
return
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Optional[Any] = self.get_rust_tokenizer()
__magic_name__ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
__magic_name__ : List[Any] = tokenizer.tokenize(_A )
__magic_name__ : Dict = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
__magic_name__ : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__magic_name__ : List[Any] = self.get_rust_tokenizer()
__magic_name__ : str = tokenizer.encode(_A )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
| 275
| 0
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280
|
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowercase__ = 'CIDAS/clipseg-rd64-refined'
lowercase__ = 'image_segmenter'
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ['image', 'text']
lowercase__ = ['image']
def __init__( self , *__a , **__a) -> Any:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase = self.model(**__a).logits
return logits
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = outputs.cpu().detach().numpy()
_UpperCamelCase = 0
_UpperCamelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta))
| 100
| 1
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_2_8,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 5_0,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 1_0,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 1_0,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def snake_case ( self ):
__lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id="test-config" , push_to_hub=__a , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case ( self ):
__lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-config-org" , push_to_hub=__a , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case ( self ):
CustomConfig.register_for_auto_class()
__lowerCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__lowerCAmelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase = c.n_embd + 1 # int
__lowerCAmelCase = c.resid_pdrop + 1.0 # float
__lowerCAmelCase = not c.scale_attn_weights # bool
__lowerCAmelCase = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(__a , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(__a , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(__a , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(__a , c.summary_type , "mismatch for key: summary_type" )
def snake_case ( self ):
__lowerCAmelCase = PretrainedConfig()
__lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(__a )}." )
def snake_case ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(__a )
def snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_00
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def snake_case ( self ):
__lowerCAmelCase = AutoConfig.from_pretrained("bert-base-cased" )
__lowerCAmelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase = ["config.42.0.0.json"]
__lowerCAmelCase = 7_68
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , "config.4.0.0.json" ) , os.path.join(__a , "config.42.0.0.json" ) )
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def snake_case ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__lowerCAmelCase = "v4.0.0"
__lowerCAmelCase , __lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase = "v3.0.0"
__lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 57
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = """unispeech-sat"""
def __init__( self , UpperCAmelCase__=32 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__="group" , UpperCAmelCase__="gelu" , UpperCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase__=False , UpperCAmelCase__=128 , UpperCAmelCase__=16 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=0.05 , UpperCAmelCase__=10 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0 , UpperCAmelCase__=10 , UpperCAmelCase__=0 , UpperCAmelCase__=320 , UpperCAmelCase__=2 , UpperCAmelCase__=0.1 , UpperCAmelCase__=100 , UpperCAmelCase__=256 , UpperCAmelCase__=256 , UpperCAmelCase__=0.1 , UpperCAmelCase__="mean" , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=256 , UpperCAmelCase__=(512, 512, 512, 512, 1_500) , UpperCAmelCase__=(5, 3, 3, 1, 1) , UpperCAmelCase__=(1, 2, 3, 1, 1) , UpperCAmelCase__=512 , UpperCAmelCase__=0 , UpperCAmelCase__=1 , UpperCAmelCase__=2 , UpperCAmelCase__=504 , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(UpperCAmelCase__ )
A__ = list(UpperCAmelCase__ )
A__ = list(UpperCAmelCase__ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = num_clusters
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(UpperCAmelCase__ )
A__ = list(UpperCAmelCase__ )
A__ = list(UpperCAmelCase__ )
A__ = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """gpt_neox"""
def __init__( self , UpperCAmelCase__=50_432 , UpperCAmelCase__=6_144 , UpperCAmelCase__=44 , UpperCAmelCase__=64 , UpperCAmelCase__=24_576 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.25 , UpperCAmelCase__=10_000 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=2_048 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=True , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = attention_dropout
A__ = hidden_dropout
A__ = classifier_dropout
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = tie_word_embeddings
A__ = use_parallel_residual
A__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __A ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
A__ = self.rope_scaling.get("type" , UpperCAmelCase__ )
A__ = self.rope_scaling.get("factor" , UpperCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 198
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.