code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = (CMStochasticIterativeScheduler,)
a__ = 1_0
def A__ ( self , **__snake_case):
_UpperCamelCase : str = {
'num_train_timesteps': 2_01,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**__snake_case)
return config
def A__ ( self):
_UpperCamelCase : List[Any] = 10
_UpperCamelCase : Any = self.get_scheduler_config()
_UpperCamelCase : str = self.scheduler_classes[0](**__snake_case)
scheduler.set_timesteps(__snake_case)
_UpperCamelCase : Optional[int] = scheduler.timesteps[0]
_UpperCamelCase : Union[str, Any] = scheduler.timesteps[1]
_UpperCamelCase : Union[str, Any] = self.dummy_sample
_UpperCamelCase : Tuple = 0.1 * sample
_UpperCamelCase : Any = scheduler.step(__snake_case , __snake_case , __snake_case).prev_sample
_UpperCamelCase : Union[str, Any] = scheduler.step(__snake_case , __snake_case , __snake_case).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def A__ ( self):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case)
def A__ ( self):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.scheduler_classes[0]
_UpperCamelCase : Dict = self.get_scheduler_config()
_UpperCamelCase : Union[str, Any] = scheduler_class(**__snake_case)
_UpperCamelCase : Dict = 1
scheduler.set_timesteps(__snake_case)
_UpperCamelCase : Union[str, Any] = scheduler.timesteps
_UpperCamelCase : List[Any] = torch.manual_seed(0)
_UpperCamelCase : str = self.dummy_model()
_UpperCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__snake_case):
# 1. scale model input
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(__snake_case , __snake_case)
# 2. predict noise residual
_UpperCamelCase : Optional[Any] = model(__snake_case , __snake_case)
# 3. predict previous sample x_t-1
_UpperCamelCase : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case).prev_sample
_UpperCamelCase : str = pred_prev_sample
_UpperCamelCase : Any = torch.sum(torch.abs(__snake_case))
_UpperCamelCase : str = torch.mean(torch.abs(__snake_case))
assert abs(result_sum.item() - 1_9_2.7_6_1_4) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0) < 1e-3
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Tuple = self.get_scheduler_config()
_UpperCamelCase : Union[str, Any] = scheduler_class(**__snake_case)
_UpperCamelCase : int = [1_06, 0]
scheduler.set_timesteps(timesteps=__snake_case)
_UpperCamelCase : str = scheduler.timesteps
_UpperCamelCase : Tuple = torch.manual_seed(0)
_UpperCamelCase : Tuple = self.dummy_model()
_UpperCamelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_UpperCamelCase : Optional[Any] = scheduler.scale_model_input(__snake_case , __snake_case)
# 2. predict noise residual
_UpperCamelCase : Union[str, Any] = model(__snake_case , __snake_case)
# 3. predict previous sample x_t-1
_UpperCamelCase : Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case).prev_sample
_UpperCamelCase : Optional[int] = pred_prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(__snake_case))
_UpperCamelCase : str = torch.mean(torch.abs(__snake_case))
assert abs(result_sum.item() - 3_4_7.6_3_5_7) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7) < 1e-3
def A__ ( self):
_UpperCamelCase : Dict = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config()
_UpperCamelCase : Dict = scheduler_class(**__snake_case)
_UpperCamelCase : List[str] = [39, 30, 12, 15, 0]
with self.assertRaises(__snake_case , msg='`timesteps` must be in descending order.'):
scheduler.set_timesteps(timesteps=__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.scheduler_classes[0]
_UpperCamelCase : Optional[int] = self.get_scheduler_config()
_UpperCamelCase : Dict = scheduler_class(**__snake_case)
_UpperCamelCase : Tuple = [39, 30, 12, 1, 0]
_UpperCamelCase : Tuple = len(__snake_case)
with self.assertRaises(__snake_case , msg='Can only pass one of `num_inference_steps` or `timesteps`.'):
scheduler.set_timesteps(num_inference_steps=__snake_case , timesteps=__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : Any = self.get_scheduler_config()
_UpperCamelCase : Any = scheduler_class(**__snake_case)
_UpperCamelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__snake_case)
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCAmelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""MobileNetV2FeatureExtractor"""]
lowerCAmelCase__ = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "distilbert"
a__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __snake_case=3_05_22 , __snake_case=5_12 , __snake_case=False , __snake_case=6 , __snake_case=12 , __snake_case=7_68 , __snake_case=4 * 7_68 , __snake_case=0.1 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=0.0_2 , __snake_case=0.1 , __snake_case=0.2 , __snake_case=0 , **__snake_case , ):
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = sinusoidal_pos_embds
_UpperCamelCase : Optional[int] = n_layers
_UpperCamelCase : Union[str, Any] = n_heads
_UpperCamelCase : Union[str, Any] = dim
_UpperCamelCase : Union[str, Any] = hidden_dim
_UpperCamelCase : Tuple = dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : Optional[int] = activation
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Union[str, Any] = qa_dropout
_UpperCamelCase : Union[str, Any] = seq_classif_dropout
super().__init__(**__snake_case , pad_token_id=__snake_case)
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
"""simple docstring"""
a__ = BlenderbotSmallConfig
a__ = {}
a__ = "gelu"
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=20 , __snake_case=2 , __snake_case=1 , __snake_case=0 , ):
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : Any = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : int = max_position_embeddings
_UpperCamelCase : Optional[int] = eos_token_id
_UpperCamelCase : List[Any] = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
def A__ ( self):
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase : Any = prepare_blenderbot_small_inputs_dict(__snake_case , __snake_case , __snake_case)
return config, inputs_dict
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = TFBlenderbotSmallModel(config=__snake_case).get_decoder()
_UpperCamelCase : Dict = inputs_dict['input_ids']
_UpperCamelCase : Optional[int] = input_ids[:1, :]
_UpperCamelCase : List[Any] = inputs_dict['attention_mask'][:1, :]
_UpperCamelCase : Union[str, Any] = inputs_dict['head_mask']
_UpperCamelCase : Tuple = 1
# first forward pass
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case)
_UpperCamelCase , _UpperCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case)[0]
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase : Optional[int] = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a__ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ = True
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Tuple = TFBlenderbotSmallModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case)
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
a__ = "facebook/blenderbot_small-90M"
@cached_property
def A__ ( self):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
@cached_property
def A__ ( self):
_UpperCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ ( self):
_UpperCamelCase : Optional[int] = self.tokenizer(self.src_text , return_tensors='tf')
_UpperCamelCase : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , )
_UpperCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCAmelCase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCamelCase : str = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCamelCase : List[str] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
_UpperCamelCase : int = PegasusConfig(**UpperCAmelCase_ )
_UpperCamelCase : int = PegasusForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : str = torch_model.model.state_dict()
_UpperCamelCase : List[Any] = {}
for k, v in tf_weights.items():
_UpperCamelCase : List[Any] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
_UpperCamelCase : int = v.T
_UpperCamelCase : Union[str, Any] = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
_UpperCamelCase : List[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_UpperCamelCase : List[str] = mapping['shared.weight']
_UpperCamelCase : List[Any] = mapping['shared.weight']
_UpperCamelCase : Union[str, Any] = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : List[str] = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( UpperCAmelCase_ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCamelCase : str = tf.train.list_variables(UpperCAmelCase_ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : List[Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
_UpperCamelCase : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase : Tuple = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = array
return tf_weights
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Path(UpperCAmelCase_ ).parent.name
_UpperCamelCase : Tuple = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
_UpperCamelCase : List[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
_UpperCamelCase : Union[str, Any] = get_tf_weights_as_numpy(UpperCAmelCase_ )
_UpperCamelCase : str = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
_UpperCamelCase : Optional[Any] = task_specific_params
_UpperCamelCase : List[str] = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase__ = parser.parse_args()
if args.save_dir is None:
lowerCAmelCase__ = Path(args.tf_ckpt_path).parent.name
lowerCAmelCase__ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_UpperCamelCase : List[Any] = number_of_bytes // partitions
_UpperCamelCase : Optional[Any] = []
for i in range(UpperCAmelCase_ ):
_UpperCamelCase : Any = i * bytes_per_partition + 1
_UpperCamelCase : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
_UpperCamelCase : str = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCAmelCase_ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCAmelCase_ , **UpperCAmelCase_ )
return wrapper
| 648
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=30 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=10 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=None , ):
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : int = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[Any] = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : Optional[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
_UpperCamelCase : Any = num_patches + 1
def A__ ( self):
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : List[Any] = None
if self.use_labels:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = TFViTModel(config=__snake_case)
_UpperCamelCase : int = model(__snake_case , training=__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
_UpperCamelCase : Any = self.image_size // 2
_UpperCamelCase : List[str] = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase : Tuple = model(__snake_case , interpolate_pos_encoding=__snake_case , training=__snake_case)
_UpperCamelCase : Any = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = self.type_sequence_label_size
_UpperCamelCase : int = TFViTForImageClassification(__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case , labels=__snake_case , training=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
_UpperCamelCase : Dict = self.image_size // 2
_UpperCamelCase : List[str] = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase : List[str] = model(__snake_case , interpolate_pos_encoding=__snake_case , training=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : List[str] = TFViTForImageClassification(__snake_case)
_UpperCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : List[Any] = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self):
_UpperCamelCase : Any = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = TFViTModelTester(self)
_UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , tf.keras.layers.Layer))
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__snake_case)
_UpperCamelCase : Optional[int] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
_UpperCamelCase : Optional[int] = TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def A__ ( self):
_UpperCamelCase : Dict = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
_UpperCamelCase : Dict = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=__snake_case , return_tensors='tf')
# forward pass
_UpperCamelCase : Optional[Any] = model(**__snake_case)
# verify the logits
_UpperCamelCase : int = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Dict = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3] , __snake_case , atol=1e-4)
| 648
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase__ = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowerCAmelCase__ = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase__ = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowerCAmelCase__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Any:
'''simple docstring'''
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
hf_hub_download(repo_id=UpperCAmelCase_ , filename=UpperCAmelCase_ , local_dir=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Dict="text" ) -> Optional[int]:
'''simple docstring'''
if model_type == "text":
_UpperCamelCase : Union[str, Any] = BarkSemanticModel
_UpperCamelCase : int = BarkSemanticConfig
_UpperCamelCase : str = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCamelCase : Union[str, Any] = BarkCoarseModel
_UpperCamelCase : List[Any] = BarkCoarseConfig
_UpperCamelCase : Optional[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCamelCase : Dict = BarkFineModel
_UpperCamelCase : Optional[int] = BarkFineConfig
_UpperCamelCase : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCamelCase : Optional[int] = F'''{model_type}_small''' if use_small else model_type
_UpperCamelCase : List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase_ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
_UpperCamelCase : Tuple = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
# this is a hack
_UpperCamelCase : Dict = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_UpperCamelCase : List[str] = model_args['vocab_size']
_UpperCamelCase : Any = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCamelCase : Union[str, Any] = model_args.pop('n_head' )
_UpperCamelCase : Tuple = model_args.pop('n_embd' )
_UpperCamelCase : int = model_args.pop('n_layer' )
_UpperCamelCase : str = ConfigClass(**checkpoint['model_args'] )
_UpperCamelCase : str = ModelClass(config=UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = GenerationConfigClass()
_UpperCamelCase : str = model_generation_config
_UpperCamelCase : List[Any] = checkpoint['model']
# fixup checkpoint
_UpperCamelCase : List[Any] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCamelCase : List[Any] = k[len(UpperCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_UpperCamelCase : List[Any] = new_k.replace(UpperCAmelCase_ , new_layer_name_dict[old_layer_name] )
_UpperCamelCase : Union[str, Any] = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : int = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCamelCase : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_UpperCamelCase : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCamelCase : str = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
_UpperCamelCase : List[Any] = model.num_parameters(exclude_embeddings=UpperCAmelCase_ )
_UpperCamelCase : List[str] = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCAmelCase_ , 3 )} loss''' )
model.eval()
model.to(UpperCAmelCase_ )
del checkpoint, state_dict
return model
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int="text" ) -> Dict:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCamelCase : Tuple = 'cpu' # do conversion on cpu
_UpperCamelCase : Dict = _get_ckpt_path(UpperCAmelCase_ , use_small=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = _load_model(UpperCAmelCase_ , UpperCAmelCase_ , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
# load bark initial model
_UpperCamelCase : Tuple = _bark_load_model(UpperCAmelCase_ , 'cpu' , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
if model_type == "text":
_UpperCamelCase : Any = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
_UpperCamelCase : Dict = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCamelCase : Union[str, Any] = bark_model(UpperCAmelCase_ )[0]
_UpperCamelCase : List[Any] = model(UpperCAmelCase_ )
# take last logits
_UpperCamelCase : int = output_new_model_total.logits[:, [-1], :]
else:
_UpperCamelCase : List[str] = 3
_UpperCamelCase : List[str] = 8
_UpperCamelCase : Optional[int] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCamelCase : int = model(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : int = bark_model(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
_UpperCamelCase : Tuple = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
_UpperCamelCase : int = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
_UpperCamelCase : Optional[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_UpperCamelCase : int = BarkSemanticModel.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Dict = BarkCoarseModel.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = BarkFineModel.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_UpperCamelCase : List[str] = BarkConfig.from_sub_model_configs(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCamelCase : Tuple = BarkModel(UpperCAmelCase_ )
_UpperCamelCase : List[str] = semantic
_UpperCamelCase : List[Any] = coarseAcoustic
_UpperCamelCase : Union[str, Any] = fineAcoustic
_UpperCamelCase : Tuple = codec
_UpperCamelCase : Dict = bark_generation_config
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
bark.save_pretrained(UpperCAmelCase_ , repo_id=UpperCAmelCase_ , push_to_hub=UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowerCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 648
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 1
|
import torch
from diffusers import DiffusionPipeline
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case)
def __call__( self):
_UpperCamelCase : Optional[int] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_UpperCamelCase : Any = 1
_UpperCamelCase : Optional[Any] = self.unet(__snake_case , __snake_case).sample
_UpperCamelCase : Tuple = self.scheduler.step(__snake_case , __snake_case , __snake_case).prev_sample
_UpperCamelCase : List[str] = scheduler_output - scheduler_output + torch.ones_like(__snake_case)
return result
| 648
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCAmelCase_ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_UpperCamelCase : List[str] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
_UpperCamelCase : Any = PipelineDataFormat.from_str(
format=UpperCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCAmelCase_ , UpperCAmelCase_ )
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = nlp
_UpperCamelCase : Dict = reader
@staticmethod
def A__ ( __snake_case):
_UpperCamelCase : str = parser.add_parser('run' , help='Run a pipeline through the CLI')
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run')
run_parser.add_argument('--input' , type=__snake_case , help='Path to the file to use for inference')
run_parser.add_argument('--output' , type=__snake_case , help='Path to the file that will be used post to write results.')
run_parser.add_argument('--model' , type=__snake_case , help='Name or path to the model to instantiate.')
run_parser.add_argument('--config' , type=__snake_case , help='Name or path to the model\'s config to instantiate.')
run_parser.add_argument(
'--tokenizer' , type=__snake_case , help='Name of the tokenizer to use. (default: same as the model name)')
run_parser.add_argument(
'--column' , type=__snake_case , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=__snake_case , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=__snake_case , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.')
run_parser.set_defaults(func=__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self._nlp, []
for entry in self._reader:
_UpperCamelCase : Optional[int] = nlp(**__snake_case) if self._reader.is_multi_columns else nlp(__snake_case)
if isinstance(__snake_case , __snake_case):
outputs.append(__snake_case)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_UpperCamelCase : Any = self._reader.save_binary(__snake_case)
logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(__snake_case)
| 648
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 1
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_UpperCamelCase : Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
_UpperCamelCase : Optional[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
_UpperCamelCase : int = transform(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
return image
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCamelCase : Union[str, Any] = re.sub('visual_encoder*' , 'vision_model.encoder' , UpperCAmelCase_ )
if "blocks" in key:
_UpperCamelCase : Optional[int] = re.sub(R'blocks' , 'layers' , UpperCAmelCase_ )
if "attn" in key:
_UpperCamelCase : Dict = re.sub(R'attn' , 'self_attn' , UpperCAmelCase_ )
if "norm1" in key:
_UpperCamelCase : Optional[Any] = re.sub(R'norm1' , 'layer_norm1' , UpperCAmelCase_ )
if "norm2" in key:
_UpperCamelCase : Optional[int] = re.sub(R'norm2' , 'layer_norm2' , UpperCAmelCase_ )
if "encoder.norm" in key:
_UpperCamelCase : Any = re.sub(R'encoder.norm' , 'post_layernorm' , UpperCAmelCase_ )
if "encoder.patch_embed.proj" in key:
_UpperCamelCase : Any = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , UpperCAmelCase_ )
if "encoder.pos_embed" in key:
_UpperCamelCase : Dict = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , UpperCAmelCase_ )
if "encoder.cls_token" in key:
_UpperCamelCase : List[Any] = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , UpperCAmelCase_ )
if "self_attn" in key:
_UpperCamelCase : Tuple = re.sub(R'self_attn.proj' , 'self_attn.projection' , UpperCAmelCase_ )
return key
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=None ) -> List[str]:
'''simple docstring'''
if config_path is not None:
_UpperCamelCase : Optional[Any] = BlipConfig.from_pretrained(UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[int] = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
_UpperCamelCase : Dict = BlipForConditionalGeneration(UpperCAmelCase_ ).eval()
_UpperCamelCase : Dict = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_UpperCamelCase : List[Any] = blip_decoder(pretrained=UpperCAmelCase_ , image_size=3_8_4 , vit='base' )
_UpperCamelCase : Optional[int] = pt_model.eval()
_UpperCamelCase : List[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Optional[Any] = modified_state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : Dict = rename_key(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = value
hf_model.load_state_dict(UpperCAmelCase_ )
_UpperCamelCase : List[str] = 3_8_4
_UpperCamelCase : Dict = load_demo_image(image_size=UpperCAmelCase_ , device='cpu' )
_UpperCamelCase : Tuple = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCamelCase : Any = tokenizer(['a picture of'] ).input_ids
_UpperCamelCase : Tuple = hf_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
_UpperCamelCase : Optional[int] = hf_model.generate(UpperCAmelCase_ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCamelCase : Any = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_UpperCamelCase : List[str] = blip_vqa(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit='base' )
vqa_model.eval()
_UpperCamelCase : Tuple = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Dict = modified_state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : str = rename_key(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = value
_UpperCamelCase : Optional[int] = BlipForQuestionAnswering(UpperCAmelCase_ )
hf_vqa_model.load_state_dict(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = ['How many dogs are in this image?']
_UpperCamelCase : List[Any] = tokenizer(UpperCAmelCase_ , return_tensors='pt' ).input_ids
_UpperCamelCase : Any = hf_vqa_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_UpperCamelCase : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_UpperCamelCase : Any = blip_itm(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit='base' )
itm_model.eval()
_UpperCamelCase : int = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Any = modified_state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : List[str] = rename_key(UpperCAmelCase_ )
_UpperCamelCase : Any = value
_UpperCamelCase : Tuple = BlipForImageTextRetrieval(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = ['A picture of a woman with a dog sitting in a beach']
_UpperCamelCase : List[Any] = tokenizer(
UpperCAmelCase_ , return_tensors='pt' , padding='max_length' , truncation=UpperCAmelCase_ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(UpperCAmelCase_ )
hf_itm_model.eval()
_UpperCamelCase : Union[str, Any] = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 648
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 1
|
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
_UpperCamelCase : Any = ''
while len(UpperCAmelCase_ ) % 3 != 0:
_UpperCamelCase : Optional[Any] = '0' + bin_string
_UpperCamelCase : Optional[Any] = [
bin_string[index : index + 3]
for index in range(len(UpperCAmelCase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCamelCase : Dict = 0
for index, val in enumerate(UpperCAmelCase_ ):
oct_val += int(2 ** (2 - index) * int(UpperCAmelCase_ ) )
oct_string += str(UpperCAmelCase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 648
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowercase :
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__snake_case , __snake_case)
_UpperCamelCase : int = TFVisionTextDualEncoderModel(__snake_case)
_UpperCamelCase : str = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : List[str] = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : str = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_UpperCamelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__snake_case)
_UpperCamelCase : List[Any] = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : Optional[Any] = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
_UpperCamelCase : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case)
_UpperCamelCase : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(__snake_case)
_UpperCamelCase : Dict = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
_UpperCamelCase : str = after_output[0].numpy()
_UpperCamelCase : Optional[int] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__snake_case , 1e-5)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : Tuple = model(
input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , output_attentions=__snake_case)
_UpperCamelCase : str = output.vision_model_output.attentions
self.assertEqual(len(__snake_case) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : int = to_atuple(vision_model.config.image_size)
_UpperCamelCase : int = to_atuple(vision_model.config.patch_size)
_UpperCamelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase : Union[str, Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_UpperCamelCase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(__snake_case) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = np.abs((a - b)).max()
self.assertLessEqual(__snake_case , __snake_case , f'''Difference between torch and flax is {diff} (>= {tol}).''')
def A__ ( self):
_UpperCamelCase : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__snake_case)
@slow
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.get_pretrained_model_and_inputs()
_UpperCamelCase : Optional[Any] = model_a(**__snake_case)
_UpperCamelCase : str = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__snake_case)
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(__snake_case)
_UpperCamelCase : str = model_a(**__snake_case)
_UpperCamelCase : Optional[int] = after_outputs[0].numpy()
_UpperCamelCase : Tuple = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__snake_case , 1e-5)
@require_tf
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
_UpperCamelCase : List[Any] = 13
_UpperCamelCase : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase : str = random_attention_mask([batch_size, 4])
_UpperCamelCase : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = TFViTModel(__snake_case , name='vision_model')
_UpperCamelCase : Dict = TFBertModel(__snake_case , name='text_model')
return vision_model, text_model
def A__ ( self):
_UpperCamelCase : Tuple = TFViTModelTester(self)
_UpperCamelCase : int = TFBertModelTester(self)
_UpperCamelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Dict = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_UpperCamelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
_UpperCamelCase : Dict = 13
_UpperCamelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase : Tuple = random_attention_mask([batch_size, 4])
_UpperCamelCase : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : List[str] = model(
input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , output_attentions=__snake_case)
_UpperCamelCase : str = output.vision_model_output.attentions
self.assertEqual(len(__snake_case) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCamelCase : List[str] = to_atuple(vision_model.config.image_size)
_UpperCamelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size)
_UpperCamelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase : str = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_UpperCamelCase : Tuple = output.text_model_output.attentions
self.assertEqual(len(__snake_case) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : str = TFDeiTModel(__snake_case , name='vision_model')
_UpperCamelCase : Dict = TFRobertaModel(__snake_case , name='text_model')
return vision_model, text_model
def A__ ( self):
_UpperCamelCase : List[str] = TFDeiTModelTester(self)
_UpperCamelCase : int = TFRobertaModelTester(self)
_UpperCamelCase : int = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
_UpperCamelCase : List[Any] = 13
_UpperCamelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase : str = random_attention_mask([batch_size, 4])
_UpperCamelCase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Dict = TFCLIPVisionModel(__snake_case , name='vision_model')
_UpperCamelCase : Tuple = TFBertModel(__snake_case , name='text_model')
return vision_model, text_model
def A__ ( self):
_UpperCamelCase : int = TFCLIPVisionModelTester(self)
_UpperCamelCase : str = TFBertModelTester(self)
_UpperCamelCase : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Tuple = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase : List[str] = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=__snake_case)
_UpperCamelCase : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_UpperCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCamelCase : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__snake_case , padding=__snake_case , return_tensors='np')
_UpperCamelCase : Union[str, Any] = model(**__snake_case)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase : List[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __snake_case , atol=1e-3))
| 648
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 1
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCAmelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCAmelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_ ( UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(UpperCAmelCase_ ) - np.asarray(UpperCAmelCase_ )) ** 2 ) )
def lowerCamelCase_ ( UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0_0_0_0 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase__ = TypeVar("""T""")
class lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case):
_UpperCamelCase : Any | T = None
_UpperCamelCase : int = len(__snake_case)
_UpperCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
_UpperCamelCase : List[Any] = fnc
self.build()
def A__ ( self):
for p in range(self.N - 1 , 0 , -1):
_UpperCamelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def A__ ( self , __snake_case , __snake_case):
p += self.N
_UpperCamelCase : Optional[int] = v
while p > 1:
_UpperCamelCase : List[Any] = p // 2
_UpperCamelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def A__ ( self , __snake_case , __snake_case): # noqa: E741
_UpperCamelCase , _UpperCamelCase : Optional[int] = l + self.N, r + self.N
_UpperCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
_UpperCamelCase : List[Any] = self.st[l] if res is None else self.fn(__snake_case , self.st[l])
if r % 2 == 0:
_UpperCamelCase : Any = self.st[r] if res is None else self.fn(__snake_case , self.st[r])
_UpperCamelCase , _UpperCamelCase : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCAmelCase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCAmelCase__ = SegmentTree(test_array, min)
lowerCAmelCase__ = SegmentTree(test_array, max)
lowerCAmelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(UpperCAmelCase_ ) ):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : int = reduce(UpperCAmelCase_ , test_array[i : j + 1] )
_UpperCamelCase : List[str] = reduce(UpperCAmelCase_ , test_array[i : j + 1] )
_UpperCamelCase : Optional[int] = reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
assert max_range == max_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
assert sum_range == sum_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 648
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case="sayef/fsner-bert-base-uncased"):
super(__snake_case , self).__init__()
_UpperCamelCase : Any = AutoModel.from_pretrained(__snake_case , return_dict=__snake_case)
_UpperCamelCase : List[Any] = torch.nn.CosineSimilarity(3 , 1e-08)
_UpperCamelCase : Tuple = torch.nn.Softmax(dim=1)
def A__ ( self , **__snake_case):
return self.bert(**__snake_case).last_hidden_state
def A__ ( self , __snake_case):
return token_embeddings.sum(2 , keepdim=__snake_case)
def A__ ( self , __snake_case , __snake_case , __snake_case=1):
return self.softmax(T * self.cos(__snake_case , __snake_case))
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = W_supports['sizes'].tolist()
_UpperCamelCase : Dict = W_supports['start_token_id'].item()
_UpperCamelCase : str = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : Optional[Any] = self.BERT(**__snake_case)
_UpperCamelCase : List[str] = self.BERT(**__snake_case)
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = W_supports['input_ids'] == start_token_id
_UpperCamelCase : Optional[Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case):
if i == 0:
_UpperCamelCase : str = 0
else:
_UpperCamelCase : Dict = support_sizes[i - 1]
_UpperCamelCase : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T).sum(1).softmax(0)
_UpperCamelCase : List[str] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0)
if p_starts is not None:
_UpperCamelCase : List[Any] = torch.vstack((p_starts, p_start))
_UpperCamelCase : Any = torch.vstack((p_ends, p_end))
else:
_UpperCamelCase : int = p_start
_UpperCamelCase : Tuple = p_end
return p_starts, p_ends
| 648
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 1
|
import datasets
lowerCAmelCase__ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
lowerCAmelCase__ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
lowerCAmelCase__ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32'),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32'),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A__ ( self , __snake_case , __snake_case):
return {"accuracy": simple_accuracy(__snake_case , __snake_case)}
| 648
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
_UpperCamelCase : Tuple = downstream_dict['projector.weight']
_UpperCamelCase : int = downstream_dict['projector.bias']
_UpperCamelCase : str = downstream_dict['model.post_net.linear.weight']
_UpperCamelCase : Any = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
_UpperCamelCase : Dict = downstream_dict['model.linear.weight']
_UpperCamelCase : Optional[Any] = downstream_dict['model.linear.bias']
return model
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = WavaVecaForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
_UpperCamelCase : Any = downstream_dict['connector.weight']
_UpperCamelCase : Dict = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCamelCase : Tuple = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCamelCase : str = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCamelCase : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_UpperCamelCase : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_UpperCamelCase : List[str] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_UpperCamelCase : Any = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_UpperCamelCase : Dict = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Tuple = checkpoint['Downstream']
_UpperCamelCase : Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
_UpperCamelCase : Dict = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_UpperCamelCase : Union[str, Any] = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
_UpperCamelCase : List[Any] = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('ForXVector' ):
_UpperCamelCase : Any = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCamelCase : Dict = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 648
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "tokenizer"]
a__ = "FlavaImageProcessor"
a__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case):
_UpperCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __snake_case , )
_UpperCamelCase : Tuple = kwargs.pop('feature_extractor')
_UpperCamelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(__snake_case , __snake_case)
_UpperCamelCase : List[str] = self.image_processor
def __call__( self , __snake_case = None , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = False , __snake_case = None , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
_UpperCamelCase : Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
if images is not None:
_UpperCamelCase : Any = self.image_processor(
__snake_case , return_image_mask=__snake_case , return_codebook_pixels=__snake_case , return_tensors=__snake_case , **__snake_case , )
if text is not None and images is not None:
encoding.update(__snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case) , tensor_type=__snake_case)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.decode(*__snake_case , **__snake_case)
@property
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def A__ ( self):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __snake_case , )
return self.image_processor_class
@property
def A__ ( self):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __snake_case , )
return self.image_processor
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = SwinConfig()
_UpperCamelCase : Optional[int] = swin_name.split('_' )
_UpperCamelCase : str = name_split[1]
_UpperCamelCase : str = int(name_split[4] )
_UpperCamelCase : Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCamelCase : Union[str, Any] = 9_6
_UpperCamelCase : Optional[Any] = (2, 2, 6, 2)
_UpperCamelCase : str = (3, 6, 1_2, 2_4)
elif model_size == "small":
_UpperCamelCase : List[str] = 9_6
_UpperCamelCase : Optional[Any] = (2, 2, 1_8, 2)
_UpperCamelCase : List[Any] = (3, 6, 1_2, 2_4)
elif model_size == "base":
_UpperCamelCase : List[str] = 1_2_8
_UpperCamelCase : List[str] = (2, 2, 1_8, 2)
_UpperCamelCase : List[str] = (4, 8, 1_6, 3_2)
else:
_UpperCamelCase : Dict = 1_9_2
_UpperCamelCase : List[str] = (2, 2, 1_8, 2)
_UpperCamelCase : str = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
_UpperCamelCase : Any = 2_1_8_4_1
else:
_UpperCamelCase : int = 1_0_0_0
_UpperCamelCase : List[Any] = 'huggingface/label-files'
_UpperCamelCase : Any = 'imagenet-1k-id2label.json'
_UpperCamelCase : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Optional[int] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Dict = img_size
_UpperCamelCase : int = num_classes
_UpperCamelCase : Union[str, Any] = embed_dim
_UpperCamelCase : List[Any] = depths
_UpperCamelCase : Optional[Any] = num_heads
_UpperCamelCase : Tuple = window_size
return config
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCamelCase : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCamelCase : List[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCamelCase : Dict = 'encoder.' + name
if "attn.proj" in name:
_UpperCamelCase : int = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCamelCase : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCamelCase : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCamelCase : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_UpperCamelCase : Dict = 'layernorm.weight'
if name == "norm.bias":
_UpperCamelCase : int = 'layernorm.bias'
if "head" in name:
_UpperCamelCase : List[Any] = name.replace('head' , 'classifier' )
else:
_UpperCamelCase : Union[str, Any] = 'swin.' + name
return name
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Any = orig_state_dict.pop(UpperCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCamelCase : Dict = key.split('.' )
_UpperCamelCase : Any = int(key_split[1] )
_UpperCamelCase : Optional[Any] = int(key_split[3] )
_UpperCamelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase : Tuple = val[:dim, :]
_UpperCamelCase : str = val[
dim : dim * 2, :
]
_UpperCamelCase : Any = val[-dim:, :]
else:
_UpperCamelCase : Optional[int] = val[
:dim
]
_UpperCamelCase : Optional[int] = val[
dim : dim * 2
]
_UpperCamelCase : List[Any] = val[
-dim:
]
else:
_UpperCamelCase : str = val
return orig_state_dict
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
_UpperCamelCase : Optional[Any] = get_swin_config(UpperCAmelCase_ )
_UpperCamelCase : Any = SwinForImageClassification(UpperCAmelCase_ )
model.eval()
_UpperCamelCase : Any = convert_state_dict(timm_model.state_dict() , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
_UpperCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_UpperCamelCase : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
_UpperCamelCase : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
_UpperCamelCase : Union[str, Any] = timm_model(inputs['pixel_values'] )
_UpperCamelCase : int = model(**UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 648
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 1
|
from math import ceil
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0_1 ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_UpperCamelCase : Any = 2 * i + 1
_UpperCamelCase : Dict = 2 * i
_UpperCamelCase : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 648
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = None
# source code of `config_class`
_UpperCamelCase : str = inspect.getsource(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = _re_checkpoint.findall(UpperCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_UpperCamelCase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCamelCase : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_UpperCamelCase : Dict = ckpt_name
break
return checkpoint
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCamelCase : Tuple = get_checkpoint_from_config_class(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_UpperCamelCase : List[str] = '\n'.join(sorted(UpperCAmelCase_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 648
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("""fixtures""")
lowerCAmelCase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowerCAmelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 0
def A__ ( self):
_UpperCamelCase : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(A__ , A__)
def A__ ( self):
_UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(A__)
self.assertIsInstance(A__ , A__)
def A__ ( self):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCamelCase : int = AutoFeatureExtractor.from_pretrained(A__).to_dict()
config_dict.pop('feature_extractor_type')
_UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor(**A__)
# save in new folder
model_config.save_pretrained(A__)
config.save_pretrained(A__)
_UpperCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(A__)
# make sure private variable is not incorrectly saved
_UpperCamelCase : Union[str, Any] = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(A__ , A__)
def A__ ( self):
_UpperCamelCase : Dict = AutoFeatureExtractor.from_pretrained(A__)
self.assertIsInstance(A__ , A__)
def A__ ( self):
with self.assertRaisesRegex(
A__ , 'bert-base is not a local folder and is not a valid model identifier'):
_UpperCamelCase : Any = AutoFeatureExtractor.from_pretrained('bert-base')
def A__ ( self):
with self.assertRaisesRegex(
A__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_UpperCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(A__ , revision='aaaaaa')
def A__ ( self):
with self.assertRaisesRegex(
A__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
_UpperCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model')
def A__ ( self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A__):
_UpperCamelCase : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__):
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A__)
_UpperCamelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A__)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__)
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained(A__ , trust_remote_code=A__)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
def A__ ( self):
try:
AutoConfig.register('custom' , A__)
AutoFeatureExtractor.register(A__ , A__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__):
AutoFeatureExtractor.register(A__ , A__)
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCamelCase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(A__)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__)
_UpperCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(A__)
self.assertIsInstance(A__ , A__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def A__ ( self):
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
a__ = True
try:
AutoConfig.register('custom' , A__)
AutoFeatureExtractor.register(A__ , A__)
# If remote code is not set, the default is to use local
_UpperCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
_UpperCamelCase : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A__)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
_UpperCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A__)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(not hasattr(A__ , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( _snake_case ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
_UpperCamelCase : List[str] = eval_examples
_UpperCamelCase : Dict = post_process_function
_UpperCamelCase : Union[str, Any] = quant_trainer_args
_UpperCamelCase : Any = 1_28 # default number of calibration samples
def A__ ( self , __snake_case=None):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
_UpperCamelCase : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
_UpperCamelCase : str = self._remove_unused_columns(lowerCAmelCase__ , description='Calibration')
return DataLoader(
lowerCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCAmelCase__ , )
def A__ ( self , __snake_case=None):
_UpperCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
_UpperCamelCase : Optional[int] = self.get_calib_dataloader(lowerCAmelCase__)
_UpperCamelCase : Tuple = self.model
quant_trainer.configure_model(lowerCAmelCase__ , self.quant_trainer_args , calib=lowerCAmelCase__)
model.eval()
quant_trainer.enable_calibration(lowerCAmelCase__)
logger.info('***** Running calibration *****')
logger.info(f''' Num examples = {self.calib_num}''')
logger.info(f''' Batch size = {calib_dataloader.batch_size}''')
for step, inputs in enumerate(lowerCAmelCase__):
# Prediction step
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = self.prediction_step(lowerCAmelCase__ , lowerCAmelCase__ , prediction_loss_only=lowerCAmelCase__)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCAmelCase__ , self.quant_trainer_args)
_UpperCamelCase : List[Any] = model
def A__ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval"):
_UpperCamelCase : str = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase : str = self.get_eval_dataloader(lowerCAmelCase__)
_UpperCamelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Any = self.compute_metrics
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : List[Any] = eval_loop(
lowerCAmelCase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , )
finally:
_UpperCamelCase : Tuple = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_UpperCamelCase : Tuple = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions)
_UpperCamelCase : Dict = self.compute_metrics(lowerCAmelCase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
_UpperCamelCase : Optional[Any] = metrics.pop(lowerCAmelCase__)
self.log(lowerCAmelCase__)
else:
_UpperCamelCase : Tuple = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_UpperCamelCase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase__)
return metrics
def A__ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test"):
_UpperCamelCase : Optional[int] = self.get_test_dataloader(lowerCAmelCase__)
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : str = self.compute_metrics
_UpperCamelCase : str = None
_UpperCamelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : int = eval_loop(
lowerCAmelCase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , )
finally:
_UpperCamelCase : Union[str, Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase : Union[str, Any] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions , 'predict')
_UpperCamelCase : Optional[Any] = self.compute_metrics(lowerCAmelCase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
_UpperCamelCase : str = metrics.pop(lowerCAmelCase__)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase__)
def A__ ( self , __snake_case="./"):
_UpperCamelCase : List[Any] = self.eval_dataset
_UpperCamelCase : Optional[int] = self.get_eval_dataloader(lowerCAmelCase__)
_UpperCamelCase : str = next(iter(lowerCAmelCase__))
# saving device - to make it consistent
_UpperCamelCase : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
_UpperCamelCase : Optional[int] = tuple(v.to(lowerCAmelCase__) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.model.to(lowerCAmelCase__)
model.eval()
model.float()
_UpperCamelCase : str = model.module if hasattr(lowerCAmelCase__ , 'module') else model
quant_trainer.configure_model(lowerCAmelCase__ , self.quant_trainer_args)
_UpperCamelCase : Optional[int] = os.path.join(lowerCAmelCase__ , 'model.onnx')
logger.info(f'''exporting model to {output_model_file}''')
_UpperCamelCase : Optional[int] = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , export_params=lowerCAmelCase__ , opset_version=13 , do_constant_folding=lowerCAmelCase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=lowerCAmelCase__ , )
logger.info('onnx export finished')
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
from __future__ import annotations
from typing import Any
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case = 0):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = row, column
_UpperCamelCase : Any = [[default_value for c in range(UpperCAmelCase_)] for r in range(UpperCAmelCase_)]
def __str__( self):
_UpperCamelCase : str = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_UpperCamelCase : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCamelCase : int = max(UpperCAmelCase_ , len(str(UpperCAmelCase_)))
_UpperCamelCase : List[Any] = f'''%{max_element_length}s'''
# Make string and return
def single_line(__snake_case) -> str:
nonlocal string_format_identifier
_UpperCamelCase : List[Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_) for row_vector in self.array)
return s
def __repr__( self):
return str(self)
def A__ ( self , __snake_case):
if not (isinstance(UpperCAmelCase_ , (list, tuple)) and len(UpperCAmelCase_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __snake_case):
assert self.validate_indicies(UpperCAmelCase_)
return self.array[loc[0]][loc[1]]
def __setitem__( self , __snake_case , __snake_case):
assert self.validate_indicies(UpperCAmelCase_)
_UpperCamelCase : Optional[Any] = value
def __add__( self , __snake_case):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == another.row and self.column == another.column
# Add
_UpperCamelCase : Optional[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase : Dict = self[r, c] + another[r, c]
return result
def __neg__( self):
_UpperCamelCase : Union[str, Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase : str = -self[r, c]
return result
def __sub__( self , __snake_case):
return self + (-another)
def __mul__( self , __snake_case):
if isinstance(UpperCAmelCase_ , (int, float)): # Scalar multiplication
_UpperCamelCase : Any = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Matrix multiplication
assert self.column == another.row
_UpperCamelCase : List[Any] = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCamelCase : Optional[int] = f'''Unsupported type given for another ({type(UpperCAmelCase_)})'''
raise TypeError(UpperCAmelCase_)
def A__ ( self):
_UpperCamelCase : Any = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase : Dict = self[r, c]
return result
def A__ ( self , __snake_case , __snake_case):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCamelCase : List[Any] = v.transpose()
_UpperCamelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
_UpperCamelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_UpperCamelCase : str = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_UpperCamelCase : Dict = Matrix(3 , 1 , 0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = 1, 2, -3
_UpperCamelCase : List[Any] = Matrix(3 , 1 , 0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
_UpperCamelCase : Optional[Any] = (
'Wrong input data\'s dimensions... '
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase : str = (
'Wrong input data\'s shape... '
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase : Tuple = (
'Input data have different datatype... '
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(UpperCAmelCase_ )
_UpperCamelCase : int = []
for value in value_array:
_UpperCamelCase : Optional[int] = euclidean(UpperCAmelCase_ , dataset[0] )
_UpperCamelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase : List[Any] = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
_UpperCamelCase : List[str] = temp_dist
_UpperCamelCase : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> float:
'''simple docstring'''
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCAmelCase__ = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowerCAmelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase__ = dict(zip(vocab, range(len(vocab))))
lowerCAmelCase__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(tmpdirname)
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
lowerCAmelCase__ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCAmelCase__ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCAmelCase__ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowerCAmelCase__ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCAmelCase__ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class lowercase ( lowercase__ ):
"""simple docstring"""
a__ = "efficientformer"
def __init__( self , __snake_case = [3, 2, 6, 4] , __snake_case = [48, 96, 2_24, 4_48] , __snake_case = [True, True, True, True] , __snake_case = 4_48 , __snake_case = 32 , __snake_case = 4 , __snake_case = 7 , __snake_case = 5 , __snake_case = 8 , __snake_case = 4 , __snake_case = 0.0 , __snake_case = 16 , __snake_case = 3 , __snake_case = 3 , __snake_case = 3 , __snake_case = 2 , __snake_case = 1 , __snake_case = 0.0 , __snake_case = 1 , __snake_case = True , __snake_case = True , __snake_case = 1e-5 , __snake_case = "gelu" , __snake_case = 0.0_2 , __snake_case = 1e-12 , __snake_case = 2_24 , __snake_case = 1e-05 , **__snake_case , ):
super().__init__(**__lowerCamelCase)
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : str = hidden_sizes
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[int] = layer_norm_eps
_UpperCamelCase : Union[str, Any] = patch_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : str = depths
_UpperCamelCase : Tuple = mlp_expansion_ratio
_UpperCamelCase : Dict = downsamples
_UpperCamelCase : Union[str, Any] = dim
_UpperCamelCase : List[Any] = key_dim
_UpperCamelCase : Optional[Any] = attention_ratio
_UpperCamelCase : int = resolution
_UpperCamelCase : int = pool_size
_UpperCamelCase : Optional[int] = downsample_patch_size
_UpperCamelCase : List[Any] = downsample_stride
_UpperCamelCase : Union[str, Any] = downsample_pad
_UpperCamelCase : List[str] = drop_path_rate
_UpperCamelCase : str = num_metaad_blocks
_UpperCamelCase : str = distillation
_UpperCamelCase : Dict = use_layer_scale
_UpperCamelCase : Optional[Any] = layer_scale_init_value
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : int = batch_norm_eps
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
inspect_dataset(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase : str = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
inspect_metric(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase : int = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(lowerCamelCase_ ):
get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = get_dataset_config_names(lowerCamelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = get_dataset_infos(lowerCamelCase_ )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Tuple = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = get_dataset_infos(lowerCamelCase_ )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
with pytest.raises(lowerCamelCase_ ):
get_dataset_split_names(lowerCamelCase_ , config_name=lowerCamelCase_ )
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
from collections.abc import Callable
def lowerCamelCase_ ( UpperCAmelCase_ : Callable[[float], float] , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
_UpperCamelCase : float = a
_UpperCamelCase : float = b
if function(UpperCAmelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCAmelCase_ ) == 0:
return b
elif (
function(UpperCAmelCase_ ) * function(UpperCAmelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_UpperCamelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(UpperCAmelCase_ ) == 0:
return mid
elif function(UpperCAmelCase_ ) * function(UpperCAmelCase_ ) < 0:
_UpperCamelCase : Dict = mid
else:
_UpperCamelCase : Optional[Any] = mid
_UpperCamelCase : Any = start + (end - start) / 2.0
return mid
def lowerCamelCase_ ( UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : str = DEFAULT_DEVICE , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]="summarization" , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Dict , ):
'''simple docstring'''
_UpperCamelCase : List[str] = Path(__A ).open('w' , encoding='utf-8' )
_UpperCamelCase : int = str(__A )
_UpperCamelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__A ).to(__A )
if fpaa:
_UpperCamelCase : List[str] = model.half()
_UpperCamelCase : str = AutoTokenizer.from_pretrained(__A )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
_UpperCamelCase : Optional[int] = time.time()
# update config with task specific params
use_task_specific_params(__A , __A )
if prefix is None:
_UpperCamelCase : str = prefix or getattr(model.config , 'prefix' , '' ) or ''''''
for examples_chunk in tqdm(list(chunks(__A , __A ) ) ):
_UpperCamelCase : Any = [prefix + text for text in examples_chunk]
_UpperCamelCase : Optional[int] = tokenizer(__A , return_tensors='pt' , truncation=__A , padding='longest' ).to(__A )
_UpperCamelCase : List[str] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__A , )
_UpperCamelCase : Dict = tokenizer.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
_UpperCamelCase : Optional[int] = int(time.time() - start_time ) # seconds
_UpperCamelCase : List[Any] = len(__A )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase_ ( ):
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple=True ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__A , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__A , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__A , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__A , required=__A , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__A , required=__A , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__A , required=__A , default=__A , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__A , required=__A , default=__A , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__A , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__A , default=8 , required=__A , help='batch size' )
parser.add_argument(
'--n_obs' , type=__A , default=-1 , required=__A , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__A , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_UpperCamelCase : Dict = parser.parse_known_args()
_UpperCamelCase : Any = parse_numeric_n_bool_cl_kwargs(__A )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
_UpperCamelCase : Tuple = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_UpperCamelCase : List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__A )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
_UpperCamelCase : Union[str, Any] = generate_summaries_or_translations(
__A , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__A , )
if args.reference_path is None:
return {}
# Compute scores
_UpperCamelCase : int = calculate_bleu if '''translation''' in args.task else calculate_rouge
_UpperCamelCase : List[str] = [x.rstrip() for x in open(args.save_path ).readlines()]
_UpperCamelCase : List[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__A )]
_UpperCamelCase : dict = score_fn(__A , __A )
scores.update(__A )
if args.dump_args:
scores.update(__A )
if args.info:
_UpperCamelCase : Tuple = args.info
if verbose:
print(__A )
if args.score_path is not None:
json.dump(__A , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import math
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0_0_1 ) -> Optional[int]:
'''simple docstring'''
try:
_UpperCamelCase : Dict = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Union[str, Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> str:
'''simple docstring'''
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase__ = "Create a default config file for Accelerate with only a few flags set."
def lowerCamelCase_ ( UpperCAmelCase_ : int="no" , UpperCAmelCase_ : Dict = default_json_config_file , UpperCAmelCase_ : Optional[Any] = False ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : int = Path(__lowerCAmelCase )
path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
_UpperCamelCase : Dict = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
_UpperCamelCase : Union[str, Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
_UpperCamelCase : str = torch.cuda.device_count()
_UpperCamelCase : str = num_gpus
_UpperCamelCase : str = False
if num_gpus > 1:
_UpperCamelCase : str = """MULTI_GPU"""
else:
_UpperCamelCase : int = """NO"""
elif is_xpu_available() and use_xpu:
_UpperCamelCase : Dict = torch.xpu.device_count()
_UpperCamelCase : Optional[int] = num_xpus
_UpperCamelCase : Optional[int] = False
if num_xpus > 1:
_UpperCamelCase : List[Any] = """MULTI_XPU"""
else:
_UpperCamelCase : Dict = """NO"""
elif is_npu_available():
_UpperCamelCase : Union[str, Any] = torch.npu.device_count()
_UpperCamelCase : Any = num_npus
_UpperCamelCase : Optional[int] = False
if num_npus > 1:
_UpperCamelCase : Union[str, Any] = """MULTI_NPU"""
else:
_UpperCamelCase : int = """NO"""
else:
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = True
_UpperCamelCase : Any = 1
_UpperCamelCase : Optional[Any] = """NO"""
_UpperCamelCase : Optional[int] = ClusterConfig(**__lowerCAmelCase )
config.to_json_file(__lowerCAmelCase )
return path
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Any = parser.add_parser('default' , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase )
parser.add_argument(
'--config_file' , default=__lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__lowerCAmelCase )
return parser
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
_UpperCamelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
def wrapper(*UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
_UpperCamelCase : Optional[int] = timeit.default_timer()
_UpperCamelCase : Union[str, Any] = func(*__UpperCAmelCase , **__UpperCAmelCase )
_UpperCamelCase : Optional[int] = timeit.default_timer() - starttime
return delta
_UpperCamelCase : Any = func.__name__
return wrapper
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=1_0_0 , UpperCAmelCase_ : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = []
_UpperCamelCase : Dict = seq_shapes or {}
for i in range(__UpperCAmelCase ):
_UpperCamelCase : Dict = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
_UpperCamelCase : int = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
_UpperCamelCase : List[str] = 'The small grey turtle was surprisingly fast when challenged.'
else:
_UpperCamelCase : Any = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
_UpperCamelCase : Dict = v.feature
_UpperCamelCase : Union[str, Any] = seq_shapes[k]
_UpperCamelCase : Tuple = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
_UpperCamelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=1_0_0 , UpperCAmelCase_ : int=None ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
_UpperCamelCase : Dict = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_UpperCamelCase : Tuple = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowercase ( _lowercase ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowercase , )
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase__ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
lowerCAmelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = ["input_ids", "attention_mask"]
a__ = DistilBertTokenizer
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=True , __snake_case="[UNK]" , __snake_case="[SEP]" , __snake_case="[PAD]" , __snake_case="[CLS]" , __snake_case="[MASK]" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
_UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _UpperCamelCase) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase) != tokenize_chinese_chars
):
_UpperCamelCase : Dict = getattr(_UpperCamelCase , normalizer_state.pop('type'))
_UpperCamelCase : int = do_lower_case
_UpperCamelCase : Optional[Any] = strip_accents
_UpperCamelCase : Dict = tokenize_chinese_chars
_UpperCamelCase : Optional[int] = normalizer_class(**_UpperCamelCase)
_UpperCamelCase : List[str] = do_lower_case
def A__ ( self , __snake_case , __snake_case=None):
_UpperCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase)
return tuple(_UpperCamelCase)
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( a__ , unittest.TestCase ):
"""simple docstring"""
a__ = PegasusTokenizer
a__ = PegasusTokenizerFast
a__ = True
a__ = True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : str = PegasusTokenizer(_A)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def A__ ( self):
return PegasusTokenizer.from_pretrained('google/pegasus-large')
def A__ ( self , **__snake_case):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A)
def A__ ( self , __snake_case):
return ("This is a test", "This is a test")
def A__ ( self):
_UpperCamelCase : Dict = '</s>'
_UpperCamelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '</s>')
self.assertEqual(vocab_keys[-1] , 'v')
self.assertEqual(len(_A) , 11_03)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 11_03)
def A__ ( self):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname)
_UpperCamelCase : int = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_UpperCamelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A).input_ids[0]
_UpperCamelCase : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A).input_ids[0]
self.assertListEqual(_A , _A)
def A__ ( self):
_UpperCamelCase : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCamelCase : Optional[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_UpperCamelCase : str = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_UpperCamelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_A).input_ids[0]
self.assertListEqual(_A , _A)
def A__ ( self):
_UpperCamelCase : Tuple = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_UpperCamelCase : Dict = 'To ensure a smooth flow of bank resolutions.'
_UpperCamelCase : Dict = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_UpperCamelCase : str = tokenizer([raw_input_str] , return_tensors=_A).input_ids[0]
self.assertListEqual(_A , _A)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A__ ( self):
_UpperCamelCase : List[Any] = ['This is going to be way too long.' * 1_50, 'short example']
_UpperCamelCase : List[Any] = ['not super long but more than 5 tokens', 'tiny']
_UpperCamelCase : List[Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='pt')
_UpperCamelCase : str = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='pt')
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_A) == 2 # input_ids, attention_mask.
@slow
def A__ ( self):
_UpperCamelCase : Tuple = {'input_ids': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowercase ( a__ , unittest.TestCase ):
"""simple docstring"""
a__ = PegasusTokenizer
a__ = PegasusTokenizerFast
a__ = True
a__ = True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Optional[Any] = PegasusTokenizer(_A , offset=0 , mask_token_sent=_A , mask_token='[MASK]')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def A__ ( self):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
def A__ ( self , **__snake_case):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A)
def A__ ( self , __snake_case):
return ("This is a test", "This is a test")
def A__ ( self):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
_UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_UpperCamelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A).input_ids[0]
_UpperCamelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A).input_ids[0]
self.assertListEqual(_A , _A)
@require_torch
def A__ ( self):
_UpperCamelCase : Optional[Any] = ['This is going to be way too long.' * 10_00, 'short example']
_UpperCamelCase : Dict = ['not super long but more than 5 tokens', 'tiny']
_UpperCamelCase : List[Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='pt')
_UpperCamelCase : int = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='pt')
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_A) == 2 # input_ids, attention_mask.
def A__ ( self):
_UpperCamelCase : Tuple = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_UpperCamelCase : int = self._large_tokenizer(_A).input_ids
self.assertListEqual(
_A , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
_UpperCamelCase : Dict = [True] * (num + 1)
_UpperCamelCase : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _lowerCamelCase ):
_UpperCamelCase : List[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : List[str] = mock.Mock()
_UpperCamelCase : Any = 5_00
_UpperCamelCase : int = {}
_UpperCamelCase : Union[str, Any] = HTTPError
_UpperCamelCase : int = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase_) as mock_head:
_UpperCamelCase : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self):
_UpperCamelCase : List[str] = mock.Mock()
_UpperCamelCase : Any = 5_00
_UpperCamelCase : Any = {}
_UpperCamelCase : Union[str, Any] = HTTPError
_UpperCamelCase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : Optional[Any] = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase_) as mock_head:
_UpperCamelCase : Optional[Any] = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self):
try:
_UpperCamelCase : Optional[int] = tempfile.mktemp()
with open(lowerCamelCase_ , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , lowerCamelCase_)
_UpperCamelCase : List[str] = AlbertTokenizer.from_pretrained(lowerCamelCase_)
finally:
os.remove(lowerCamelCase_)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , lowerCamelCase_)
_UpperCamelCase : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def A__ ( self):
_UpperCamelCase : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls):
_UpperCamelCase : str = TOKEN
HfFolder.save_token(lowerCamelCase_)
@classmethod
def A__ ( cls):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def A__ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = os.path.join(lowerCamelCase_ , 'vocab.txt')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : Tuple = BertTokenizer(lowerCamelCase_)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
_UpperCamelCase : Optional[Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ , repo_id='test-tokenizer' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token)
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def A__ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : int = os.path.join(lowerCamelCase_ , 'vocab.txt')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : Union[str, Any] = BertTokenizer(lowerCamelCase_)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
_UpperCamelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token)
_UpperCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def A__ ( self):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : List[Any] = os.path.join(lowerCamelCase_ , 'vocab.txt')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : Optional[Any] = CustomTokenizer(lowerCamelCase_)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase_)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Dict = os.path.join(lowerCamelCase_ , 'vocab.txt')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : Dict = BertTokenizerFast.from_pretrained(lowerCamelCase_)
bert_tokenizer.save_pretrained(lowerCamelCase_)
_UpperCamelCase : List[Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase_)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCamelCase_ , trust_remote_code=lowerCamelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Union[str, Any] = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def A__ ( self):
_UpperCamelCase : Union[str, Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def A__ ( self):
_UpperCamelCase : List[Any] = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def A__ ( self):
_UpperCamelCase : Optional[int] = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def A__ ( self):
_UpperCamelCase : Optional[Any] = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def A__ ( self):
_UpperCamelCase : List[Any] = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def A__ ( self):
_UpperCamelCase : List[Any] = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def A__ ( self):
_UpperCamelCase : int = Trie()
_UpperCamelCase : Optional[int] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(lowerCamelCase_ , ['AB', 'C'])
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> np.array:
'''simple docstring'''
_UpperCamelCase : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase : int = np.zeros((n + 1,) )
_UpperCamelCase : Any = ya
_UpperCamelCase : List[Any] = xa
for k in range(_A ):
_UpperCamelCase : List[str] = y[k] + step_size * ode_func(_A , y[k] )
_UpperCamelCase : Dict = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class lowercase ( _a , unittest.TestCase ):
"""simple docstring"""
a__ = BartphoTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : List[Any] = ['▁This', '▁is', '▁a', '▁t', 'est']
_UpperCamelCase : Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_))))
_UpperCamelCase : Tuple = {'unk_token': '<unk>'}
_UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'])
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''')
_UpperCamelCase : Optional[Any] = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self , **__snake_case):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case_)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[Any] = 'This is a là test'
_UpperCamelCase : Any = 'This is a<unk><unk> test'
return input_text, output_text
def A__ ( self):
_UpperCamelCase : List[str] = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map)
_UpperCamelCase : Any = 'This is a là test'
_UpperCamelCase : Optional[int] = '▁This ▁is ▁a ▁l à ▁t est'.split()
_UpperCamelCase : Tuple = tokenizer.tokenize(snake_case_)
self.assertListEqual(snake_case_ , snake_case_)
_UpperCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_) , snake_case_)
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase__ = '''src/transformers'''
lowerCAmelCase__ = '''docs/source/en'''
lowerCAmelCase__ = '''.'''
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCamelCase : int = f.readlines()
# Find the start prompt.
_UpperCamelCase : int = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
_UpperCamelCase : Union[str, Any] = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowerCAmelCase__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
_UpperCamelCase : int = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , __UpperCamelCase )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = 2 if text == """✅""" or text == """❌""" else len(__UpperCamelCase )
_UpperCamelCase : Tuple = (width - text_length) // 2
_UpperCamelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : List[str] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : Dict = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : List[Any] = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : str = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : str = collections.defaultdict(__UpperCamelCase )
_UpperCamelCase : Dict = collections.defaultdict(__UpperCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = None
if attr_name.endswith('Tokenizer' ):
_UpperCamelCase : Dict = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : str = attr_name[:-1_3]
elif _re_tf_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Tuple = _re_tf_models.match(__UpperCamelCase ).groups()[0]
elif _re_flax_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Tuple = _re_flax_models.match(__UpperCamelCase ).groups()[0]
elif _re_pt_models.match(__UpperCamelCase ) is not None:
_UpperCamelCase : List[Any] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(__UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__UpperCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Any = True
break
# Try again after removing the last word in the name
_UpperCamelCase : Tuple = """""".join(camel_case_split(__UpperCamelCase )[:-1] )
# Let's build that table!
_UpperCamelCase : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : str = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : str = [len(__UpperCamelCase ) + 2 for c in columns]
_UpperCamelCase : Dict = max([len(__UpperCamelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : List[Any] = """|""" + """|""".join([_center_text(__UpperCamelCase , __UpperCamelCase ) for c, w in zip(__UpperCamelCase , __UpperCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
_UpperCamelCase : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCamelCase : Union[str, Any] = model_name_to_prefix[name]
_UpperCamelCase : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__UpperCamelCase , __UpperCamelCase ) for l, w in zip(__UpperCamelCase , __UpperCamelCase )] ) + "|\n"
return table
def lowerCamelCase_ ( UpperCAmelCase_ : Any=False ) -> Any:
'''simple docstring'''
_UpperCamelCase : Optional[int] = _find_text_in_file(
filename=os.path.join(__UpperCamelCase , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
_UpperCamelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__UpperCamelCase , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCamelCase : str = flax_key_tuple[:-1] + ('weight',)
_UpperCamelCase : str = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
_UpperCamelCase : Any = flax_key_tuple[:-1] + ('weight',)
_UpperCamelCase : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCamelCase : Any = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
if "metadata" in layer:
_UpperCamelCase : Optional[int] = layer.split('metadata' )
_UpperCamelCase : Optional[Any] = ''.join(split_layer[0] )[:-1]
_UpperCamelCase : Optional[Any] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
_UpperCamelCase : int = layer.split('kvstore' )
_UpperCamelCase : List[Any] = ''.join(split_layer[0] )[:-1]
_UpperCamelCase : List[str] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
_UpperCamelCase : Tuple = layer.split('/' )
_UpperCamelCase : int = '/'.join(split_layer[:-1] )
_UpperCamelCase : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCamelCase : Tuple = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCamelCase : Dict = 'file'
else:
_UpperCamelCase : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = rename_keys(snake_case__ )
_UpperCamelCase : List[Any] = {}
for k, v in current_block.items():
_UpperCamelCase : int = v
_UpperCamelCase : int = new_current_block
torch.save(snake_case__ , snake_case__ )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str = WEIGHTS_NAME ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = convert_file_size_to_int(snake_case__ )
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : List[str] = 0
_UpperCamelCase : List[Any] = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
_UpperCamelCase : int = serialization.msgpack_restore(fp.read() )['optimizer']['target']
_UpperCamelCase : str = flatten_dict(snake_case__ , sep='/' )
_UpperCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
_UpperCamelCase : Optional[Any] = content
else:
_UpperCamelCase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCamelCase : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCamelCase : Tuple = torch.tensor(snake_case__ )
_UpperCamelCase : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCamelCase , _UpperCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case__ )
_UpperCamelCase : Tuple = '/'.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCamelCase : List[Any] = os.path.join(
snake_case__ , weights_name.replace('.bin' , F'''-{len(snake_case__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Dict = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCamelCase : List[Any] = os.path.join(snake_case__ , weights_name.replace('.bin' , F'''-{len(snake_case__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : Tuple = {}
for idx, shard in enumerate(snake_case__ ):
_UpperCamelCase : int = weights_name.replace(
'.bin' , F'''-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCamelCase : Union[str, Any] = os.path.join(snake_case__ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
_UpperCamelCase : int = shard
for key in shard:
_UpperCamelCase : List[str] = shard_file
# Add the metadata
_UpperCamelCase : int = {'total_size': total_size}
_UpperCamelCase : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' ) as f:
_UpperCamelCase : str = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
_UpperCamelCase : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
_UpperCamelCase : List[str] = TaTokenizer.from_pretrained('t5-small' )
_UpperCamelCase : Any = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
_UpperCamelCase : Optional[Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
_UpperCamelCase : Dict = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=lowercase__ )
a__ = field(default_factory=lowercase__ )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = len(list(m.modules())) == 1 or isinstance(__snake_case , nn.Convad) or isinstance(__snake_case , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__snake_case)
def __call__( self , __snake_case):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__snake_case)
[x.remove() for x in self.handles]
return self
@property
def A__ ( self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __snake_case: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 1
a__ = field(default_factory=lowercase__ )
a__ = field(default_factory=lowercase__ )
a__ = True
def __call__( self , __snake_case):
_UpperCamelCase : str = Tracker(self.dest)(__snake_case).parametrized
_UpperCamelCase : Dict = Tracker(self.src)(__snake_case).parametrized
_UpperCamelCase : Tuple = list(filter(lambda __snake_case: type(__snake_case) not in self.src_skip , __snake_case))
_UpperCamelCase : str = list(filter(lambda __snake_case: type(__snake_case) not in self.dest_skip , __snake_case))
if len(__snake_case) != len(__snake_case) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(__snake_case)} operations while'''
f''' destination module has {len(__snake_case)}.''')
for dest_m, src_m in zip(__snake_case , __snake_case):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''')
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), f'''Unexpected layer name {k}'''
_UpperCamelCase : List[str] = len(__snake_case) + 1
feature_blocks.append((f'''res{block_index}''', v))
_UpperCamelCase : Tuple = nn.ModuleDict(__snake_case)
def A__ ( self , __snake_case):
return get_trunk_forward_outputs(
__snake_case , out_feat_keys=__snake_case , feature_blocks=self._feature_blocks , )
class lowercase ( lowercase__ ):
"""simple docstring"""
def A__ ( self , __snake_case):
_UpperCamelCase : Union[str, Any] = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self , __snake_case):
# default to timm!
if x not in self:
_UpperCamelCase : Optional[int] = self.convert_name_to_timm(__snake_case)
_UpperCamelCase : Tuple = partial(lambda: (timm.create_model(__snake_case , pretrained=__snake_case).eval(), None))
else:
_UpperCamelCase : int = super().__getitem__(__snake_case)
return val
class lowercase ( lowercase__ ):
"""simple docstring"""
def __getitem__( self , __snake_case):
if "seer" in x and "in1k" not in x:
_UpperCamelCase : List[str] = RegNetModel
else:
_UpperCamelCase : Optional[int] = RegNetForImageClassification
return val
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
for from_key, to_key in keys:
_UpperCamelCase : int = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict = True , ) -> Optional[Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase : Dict = from_model_func()
_UpperCamelCase : Tuple = our_model_func(__SCREAMING_SNAKE_CASE ).eval()
_UpperCamelCase : str = ModuleTransfer(src=__SCREAMING_SNAKE_CASE , dest=__SCREAMING_SNAKE_CASE , raise_if_mismatch=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Any = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
_UpperCamelCase : Optional[int] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_UpperCamelCase : Any = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
_UpperCamelCase : List[str] = manually_copy_vissl_head(__SCREAMING_SNAKE_CASE , our_model.state_dict() , __SCREAMING_SNAKE_CASE )
our_model.load_state_dict(__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Tuple = our_model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Dict = (
our_outputs.logits if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
_UpperCamelCase : Union[str, Any] = from_model(__SCREAMING_SNAKE_CASE )
_UpperCamelCase : List[str] = from_output[-1] if type(__SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_UpperCamelCase : Any = our_outputs.hidden_states[-1]
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
_UpperCamelCase : Any = 2_2_4 if 'seer' not in name else 3_8_4
# we can use the convnext one
_UpperCamelCase : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
print(F'''Pushed {name}''' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple = True ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[int] = 'imagenet-1k-id2label.json'
_UpperCamelCase : Any = 1_0_0_0
_UpperCamelCase : Tuple = (1, num_labels)
_UpperCamelCase : Optional[Any] = 'huggingface/label-files'
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Dict = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : str = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase : Optional[int] = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Any = partial(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
_UpperCamelCase : Optional[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
_UpperCamelCase : Dict = NameToOurModelFuncMap()
_UpperCamelCase : Any = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> Tuple[nn.Module, Dict]:
_UpperCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , model_dir=str(__SCREAMING_SNAKE_CASE ) , map_location='cpu' )
_UpperCamelCase : Tuple = model_func()
# check if we have a head, if yes add it
_UpperCamelCase : List[Any] = files['classy_state_dict']['base_model']['model']
_UpperCamelCase : Union[str, Any] = model_state_dict['trunk']
model.load_state_dict(__SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
_UpperCamelCase : Dict = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase : List[Any] = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase : Optional[int] = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCamelCase : List[str] = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
_UpperCamelCase : Optional[int] = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase : Dict = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_UpperCamelCase : int = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_UpperCamelCase : Dict = partial(
__SCREAMING_SNAKE_CASE , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
__SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case = None):
if components is None:
_UpperCamelCase : Dict = []
_UpperCamelCase : Dict = list(__A)
def __len__( self):
return len(self.__components)
def __str__( self):
return "(" + ",".join(map(__A , self.__components)) + ")"
def __add__( self , __snake_case):
_UpperCamelCase : Any = len(self)
if size == len(__A):
_UpperCamelCase : Dict = [self.__components[i] + other.component(__A) for i in range(__A)]
return Vector(__A)
else:
raise Exception('must have the same size')
def __sub__( self , __snake_case):
_UpperCamelCase : Union[str, Any] = len(self)
if size == len(__A):
_UpperCamelCase : Optional[int] = [self.__components[i] - other.component(__A) for i in range(__A)]
return Vector(__A)
else: # error case
raise Exception('must have the same size')
@overload
def __mul__( self , __snake_case):
...
@overload
def __mul__( self , __snake_case):
...
def __mul__( self , __snake_case):
if isinstance(__A , (float, int)):
_UpperCamelCase : List[Any] = [c * other for c in self.__components]
return Vector(__A)
elif isinstance(__A , __A) and len(self) == len(__A):
_UpperCamelCase : int = len(self)
_UpperCamelCase : Tuple = [self.__components[i] * other.component(__A) for i in range(__A)]
return sum(__A)
else: # error case
raise Exception('invalid operand!')
def A__ ( self):
return Vector(self.__components)
def A__ ( self , __snake_case):
if isinstance(__A , __A) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception('index out of range')
def A__ ( self , __snake_case , __snake_case):
assert -len(self.__components) <= pos < len(self.__components)
_UpperCamelCase : Optional[Any] = value
def A__ ( self):
if len(self.__components) == 0:
raise Exception('Vector is empty')
_UpperCamelCase : List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(__A))
def A__ ( self , __snake_case , __snake_case = False):
_UpperCamelCase : Union[str, Any] = self * other
_UpperCamelCase : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Any:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
return Vector([0] * dimension )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ) and (isinstance(snake_case_ , snake_case_ ))
_UpperCamelCase : List[Any] = [0] * dimension
_UpperCamelCase : List[Any] = 1
return Vector(snake_case_ )
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : Vector , UpperCAmelCase_ : Vector ) -> int:
'''simple docstring'''
assert (
isinstance(snake_case_ , snake_case_ )
and isinstance(snake_case_ , snake_case_ )
and (isinstance(snake_case_ , (int, float) ))
)
return x * scalar + y
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
random.seed(snake_case_ )
_UpperCamelCase : Union[str, Any] = [random.randint(snake_case_ , snake_case_ ) for _ in range(snake_case_ )]
return Vector(snake_case_ )
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = matrix
_UpperCamelCase : Any = w
_UpperCamelCase : Any = h
def __str__( self):
_UpperCamelCase : int = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self , __snake_case):
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase : Dict = []
for i in range(self.__height):
_UpperCamelCase : Union[str, Any] = [
self.__matrix[i][j] + other.component(__A , __A)
for j in range(self.__width)
]
matrix.append(__A)
return Matrix(__A , self.__width , self.__height)
else:
raise Exception('matrix must have the same dimension!')
def __sub__( self , __snake_case):
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase : List[str] = []
for i in range(self.__height):
_UpperCamelCase : int = [
self.__matrix[i][j] - other.component(__A , __A)
for j in range(self.__width)
]
matrix.append(__A)
return Matrix(__A , self.__width , self.__height)
else:
raise Exception('matrices must have the same dimension!')
@overload
def __mul__( self , __snake_case):
...
@overload
def __mul__( self , __snake_case):
...
def __mul__( self , __snake_case):
if isinstance(__A , __A): # matrix-vector
if len(__A) == self.__width:
_UpperCamelCase : Tuple = zero_vector(self.__height)
for i in range(self.__height):
_UpperCamelCase : List[str] = [
self.__matrix[i][j] * other.component(__A)
for j in range(self.__width)
]
ans.change_component(__A , sum(__A))
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!')
elif isinstance(__A , (int, float)): # matrix-scalar
_UpperCamelCase : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(__A , self.__width , self.__height)
return None
def A__ ( self):
return self.__height
def A__ ( self):
return self.__width
def A__ ( self , __snake_case , __snake_case):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds')
def A__ ( self , __snake_case , __snake_case , __snake_case):
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCamelCase : Optional[int] = value
else:
raise Exception('change_component: indices out of bounds')
def A__ ( self , __snake_case , __snake_case):
if self.__height != self.__width:
raise Exception('Matrix is not square')
_UpperCamelCase : Tuple = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__A)):
_UpperCamelCase : Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__A , self.__width - 1 , self.__height - 1).determinant()
def A__ ( self , __snake_case , __snake_case):
if self.__height != self.__width:
raise Exception('Matrix is not square')
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__A , __A)
else:
raise Exception('Indices out of bounds')
def A__ ( self):
if self.__height != self.__width:
raise Exception('Matrix is not square')
if self.__height < 1:
raise Exception('Matrix has no element')
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCamelCase : Dict = [
self.__matrix[0][y] * self.cofactor(0 , __A) for y in range(self.__width)
]
return sum(__A)
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase : list[list[float]] = [[0] * n for _ in range(snake_case_ )]
return Matrix(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
random.seed(snake_case_ )
_UpperCamelCase : list[list[float]] = [
[random.randint(snake_case_ , snake_case_ ) for _ in range(snake_case_ )] for _ in range(snake_case_ )
]
return Matrix(snake_case_ , snake_case_ , snake_case_ )
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
import os
from collections.abc import Iterator
def lowerCamelCase_ ( UpperCAmelCase_ : str = "." ) -> int:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCAmelCase_ ):
_UpperCamelCase : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCAmelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ).lstrip('./' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCAmelCase_ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(UpperCAmelCase_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCamelCase_ ( UpperCAmelCase_ : str = "." ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = """"""
for filepath in sorted(good_file_paths(UpperCAmelCase_ ) ):
_UpperCamelCase : int = os.path.split(UpperCAmelCase_ )
if filepath != old_path:
_UpperCamelCase : Optional[int] = print_path(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCamelCase : str = F'''{filepath}/{filename}'''.replace(' ' , '%20' )
_UpperCamelCase : Optional[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'''{md_prefix(UpperCAmelCase_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a__ = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a__ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a__ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a__ = field(default=2 , metadata={"help": "Batch size for training."} )
a__ = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a__ = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a__ = field(
default=1_0_0_0_0 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a__ = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
a__ = field(default="cosine" , metadata={"help": "Learning rate."} )
a__ = field(
default=7_5_0 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a__ = field(
default=1_6 , metadata={"help": "Number of gradient accumulation steps."} )
a__ = field(
default=__a , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a__ = field(default=5_0_0_0_0 , metadata={"help": "Maximum number of training steps."} )
a__ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a__ = field(default=1_0_2_4 , metadata={"help": "Sequence lengths used for training."} )
a__ = field(default=1 , metadata={"help": "Training seed."} )
a__ = field(
default=1_0_2_4 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a__ = field(
default=__a , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a__ = field(default=__a , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a__ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a__ = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a__ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a__ = field(default=1_0_2_4 , metadata={"help": "Length of sequences to be evaluated."} )
a__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a__ = field(default=__a , metadata={"help": "Number of workers used for code evaluation."} )
a__ = field(
default=__a , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a__ = field(
default=__a , metadata={"help": "Sample from the language model's output distribution."} )
a__ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a__ = field(default=2_5_6 , metadata={"help": "Maximum number of newly generated tokens."} )
a__ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a__ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a__ = field(default=1_0 , metadata={"help": "Number of generations to run in parallel."} )
a__ = field(
default=2_0_0 , metadata={"help": "Number of completions to generate for each sample."} )
a__ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a__ = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a__ = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a__ = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default=__a , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a__ = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a__ = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a__ = field(
default=1_0_0_0_0_0 , metadata={"help": "Number of files to save per JSON output file."} )
a__ = field(default="content" , metadata={"help": "Column containing text data to process."} )
a__ = field(
default=1_0_0_0 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a__ = field(
default=1_0_0 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a__ = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a__ = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a__ = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a__ = field(
default=__a , metadata={"help": "If True, near-duplicate samples are removed."} )
a__ = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a__ = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a__ = field(default="content" , metadata={"help": "Column containing text data to process."} )
a__ = field(default=2_0_0_0_0_0 , metadata={"help": "Number of examples to train tokenizer on."} )
a__ = field(
default=3_2_7_6_8 , metadata={"help": "Number of examples to train the tokenizer on."} )
a__ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a__ = field(default=__a , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a__ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a__ = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a__ = field(default=__a , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
"""simple docstring"""
a__ = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a__ = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a__ = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a__ = field(default=__a , metadata={"help": "Push saved tokenizer to the hub."} )
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
import os
from distutils.util import strtobool
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
for e in env_keys:
_UpperCamelCase : Tuple = int(os.environ.get(__snake_case , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = os.environ.get(__snake_case , str(__snake_case ) )
return strtobool(__snake_case ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]="no" ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = os.environ.get(__snake_case , str(__snake_case ) )
return value
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase__ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowerCAmelCase__ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase__ = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowerCAmelCase__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=False ):
'''simple docstring'''
_UpperCamelCase : Tuple = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
'''simple docstring'''
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
hf_hub_download(repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , local_dir=UpperCAmelCase__ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]="text" ):
'''simple docstring'''
if model_type == "text":
_UpperCamelCase : List[Any] = BarkSemanticModel
_UpperCamelCase : str = BarkSemanticConfig
_UpperCamelCase : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCamelCase : Union[str, Any] = BarkCoarseModel
_UpperCamelCase : List[str] = BarkCoarseConfig
_UpperCamelCase : int = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCamelCase : Tuple = BarkFineModel
_UpperCamelCase : int = BarkFineConfig
_UpperCamelCase : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCamelCase : List[str] = F'''{model_type}_small''' if use_small else model_type
_UpperCamelCase : Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase__ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
_UpperCamelCase : int = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )
# this is a hack
_UpperCamelCase : int = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_UpperCamelCase : Optional[Any] = model_args["""vocab_size"""]
_UpperCamelCase : Dict = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCamelCase : Dict = model_args.pop('n_head' )
_UpperCamelCase : Optional[int] = model_args.pop('n_embd' )
_UpperCamelCase : Optional[Any] = model_args.pop('n_layer' )
_UpperCamelCase : Union[str, Any] = ConfigClass(**checkpoint['model_args'] )
_UpperCamelCase : List[str] = ModelClass(config=UpperCAmelCase__ )
_UpperCamelCase : int = GenerationConfigClass()
_UpperCamelCase : List[Any] = model_generation_config
_UpperCamelCase : str = checkpoint["""model"""]
# fixup checkpoint
_UpperCamelCase : Any = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCamelCase : str = k[len(UpperCAmelCase__ ) :]
for old_layer_name in new_layer_name_dict:
_UpperCamelCase : str = new_k.replace(UpperCAmelCase__ , new_layer_name_dict[old_layer_name] )
_UpperCamelCase : Dict = state_dict.pop(UpperCAmelCase__ )
_UpperCamelCase : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCamelCase : str = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_UpperCamelCase : List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCamelCase : List[str] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCAmelCase__ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(UpperCAmelCase__ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
_UpperCamelCase : Dict = model.num_parameters(exclude_embeddings=UpperCAmelCase__ )
_UpperCamelCase : Union[str, Any] = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCAmelCase__ , 3 )} loss''' )
model.eval()
model.to(UpperCAmelCase__ )
del checkpoint, state_dict
return model
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCamelCase : str = """cpu""" # do conversion on cpu
_UpperCamelCase : List[str] = _get_ckpt_path(UpperCAmelCase__ , use_small=UpperCAmelCase__ )
_UpperCamelCase : Union[str, Any] = _load_model(UpperCAmelCase__ , UpperCAmelCase__ , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ )
# load bark initial model
_UpperCamelCase : Union[str, Any] = _bark_load_model(UpperCAmelCase__ , 'cpu' , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ )
if model_type == "text":
_UpperCamelCase : List[Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_UpperCamelCase : str = 5
_UpperCamelCase : Any = 1_0
if model_type in ["text", "coarse"]:
_UpperCamelCase : List[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCamelCase : Tuple = bark_model(UpperCAmelCase__ )[0]
_UpperCamelCase : List[Any] = model(UpperCAmelCase__ )
# take last logits
_UpperCamelCase : Dict = output_new_model_total.logits[:, [-1], :]
else:
_UpperCamelCase : Dict = 3
_UpperCamelCase : Dict = 8
_UpperCamelCase : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCamelCase : int = model(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCamelCase : Tuple = bark_model(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCamelCase : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , ):
'''simple docstring'''
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCamelCase : str = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase__ , 'config.json' ) )
_UpperCamelCase : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase__ , 'config.json' ) )
_UpperCamelCase : List[Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase__ , 'config.json' ) )
_UpperCamelCase : Any = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_UpperCamelCase : List[str] = BarkSemanticModel.from_pretrained(UpperCAmelCase__ )
_UpperCamelCase : str = BarkCoarseModel.from_pretrained(UpperCAmelCase__ )
_UpperCamelCase : List[Any] = BarkFineModel.from_pretrained(UpperCAmelCase__ )
_UpperCamelCase : Tuple = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_UpperCamelCase : Union[str, Any] = BarkConfig.from_sub_model_configs(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCamelCase : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCamelCase : Union[str, Any] = BarkModel(UpperCAmelCase__ )
_UpperCamelCase : str = semantic
_UpperCamelCase : Optional[int] = coarseAcoustic
_UpperCamelCase : Tuple = fineAcoustic
_UpperCamelCase : Union[str, Any] = codec
_UpperCamelCase : Tuple = bark_generation_config
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
bark.save_pretrained(UpperCAmelCase__ , repo_id=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowerCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2_0_0_0
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
a__ = BarthezTokenizer
a__ = BarthezTokenizerFast
a__ = True
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCAmelCase)
_UpperCamelCase : str = tokenizer
def A__ ( self):
_UpperCamelCase : Optional[Any] = '<pad>'
_UpperCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(__lowerCAmelCase) , 10_11_22)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22)
@require_torch
def A__ ( self):
_UpperCamelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCamelCase : int = [0, 57, 30_18, 7_03_07, 91, 2]
_UpperCamelCase : str = self.tokenizer(
__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='pt')
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_UpperCamelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def A__ ( self):
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Dict = self.get_rust_tokenizer()
_UpperCamelCase : Optional[int] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : Tuple = tokenizer.tokenize(__lowerCAmelCase)
_UpperCamelCase : Dict = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
_UpperCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
_UpperCamelCase : str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : int = tokenizer.encode(__lowerCAmelCase)
_UpperCamelCase : List[str] = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCamelCase : Dict = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=__lowerCAmelCase , )
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( __UpperCAmelCase ):
"""simple docstring"""
a__ = (EulerDiscreteScheduler,)
a__ = 1_0
def A__ ( self , **__snake_case):
_UpperCamelCase : List[Any] = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase_)
return config
def A__ ( self):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def A__ ( self):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def A__ ( self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def A__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def A__ ( self):
_UpperCamelCase : Optional[int] = self.scheduler_classes[0]
_UpperCamelCase : Dict = self.get_scheduler_config()
_UpperCamelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase : Dict = torch.manual_seed(0)
_UpperCamelCase : List[str] = self.dummy_model()
_UpperCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : List[str] = sample.to(UpperCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase : int = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : Optional[Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : List[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_)
_UpperCamelCase : List[Any] = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(UpperCAmelCase_))
_UpperCamelCase : str = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1e-3
def A__ ( self):
_UpperCamelCase : Optional[int] = self.scheduler_classes[0]
_UpperCamelCase : Tuple = self.get_scheduler_config(prediction_type='v_prediction')
_UpperCamelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase : Optional[Any] = torch.manual_seed(0)
_UpperCamelCase : str = self.dummy_model()
_UpperCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : int = sample.to(UpperCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase : int = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : List[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_)
_UpperCamelCase : Tuple = output.prev_sample
_UpperCamelCase : Optional[int] = torch.sum(torch.abs(UpperCAmelCase_))
_UpperCamelCase : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 0.0_0_0_2) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def A__ ( self):
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : Tuple = self.get_scheduler_config()
_UpperCamelCase : int = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_)
_UpperCamelCase : str = torch.manual_seed(0)
_UpperCamelCase : List[str] = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCamelCase : str = sample.to(UpperCAmelCase_)
for t in scheduler.timesteps:
_UpperCamelCase : List[Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : Optional[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_)
_UpperCamelCase : Tuple = output.prev_sample
_UpperCamelCase : List[Any] = torch.sum(torch.abs(UpperCAmelCase_))
_UpperCamelCase : Dict = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1e-3
def A__ ( self):
_UpperCamelCase : List[str] = self.scheduler_classes[0]
_UpperCamelCase : List[Any] = self.get_scheduler_config()
_UpperCamelCase : List[str] = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_)
_UpperCamelCase : int = torch.manual_seed(0)
_UpperCamelCase : int = self.dummy_model()
_UpperCamelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCamelCase : str = sample.to(UpperCAmelCase_)
for t in scheduler.timesteps:
_UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : int = model(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase : int = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_)
_UpperCamelCase : Any = output.prev_sample
_UpperCamelCase : int = torch.sum(torch.abs(UpperCAmelCase_))
_UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1e-3
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
assert x is not None
assert y is not None
_UpperCamelCase : Union[str, Any] = len(_lowercase )
_UpperCamelCase : Optional[int] = len(_lowercase )
# declaring the array for storing the dp values
_UpperCamelCase : List[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_UpperCamelCase : List[str] = 1 if x[i - 1] == y[j - 1] else 0
_UpperCamelCase : List[str] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_UpperCamelCase : str = ''''''
_UpperCamelCase : str = m, n
while i > 0 and j > 0:
_UpperCamelCase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCamelCase : Optional[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCAmelCase__ = 'AGGTAB'
lowerCAmelCase__ = 'GXTXAYB'
lowerCAmelCase__ = 4
lowerCAmelCase__ = 'GTAB'
lowerCAmelCase__ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0 ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = set()
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : int = n + 1 # maximum limit
for a in range(2 , __UpperCamelCase ):
for b in range(2 , __UpperCamelCase ):
_UpperCamelCase : Dict = a**b # calculates the current power
collect_powers.add(__UpperCamelCase ) # adds the result to the set
return len(__UpperCamelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case = "" , __snake_case = False):
# Mapping from the first character of the prefix of the node
_UpperCamelCase : Tuple = {}
# A node will be a leaf if the tree contains its word
_UpperCamelCase : int = is_leaf
_UpperCamelCase : str = prefix
def A__ ( self , __snake_case):
_UpperCamelCase : Union[str, Any] = 0
for q, w in zip(self.prefix , __lowercase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A__ ( self , __snake_case):
for word in words:
self.insert(__lowercase)
def A__ ( self , __snake_case):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_UpperCamelCase : Tuple = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCamelCase : Any = RadixNode(prefix=__lowercase , is_leaf=__lowercase)
else:
_UpperCamelCase : Dict = self.nodes[word[0]]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = incoming_node.match(
__lowercase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowercase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCamelCase : str = remaining_prefix
_UpperCamelCase : Dict = self.nodes[matching_string[0]]
_UpperCamelCase : Tuple = RadixNode(__lowercase , __lowercase)
_UpperCamelCase : Tuple = aux_node
if remaining_word == "":
_UpperCamelCase : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(__lowercase)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = self.nodes.get(word[0] , __lowercase)
if not incoming_node:
return False
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = incoming_node.match(
__lowercase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowercase)
def A__ ( self , __snake_case):
_UpperCamelCase : Tuple = self.nodes.get(word[0] , __lowercase)
if not incoming_node:
return False
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = incoming_node.match(
__lowercase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowercase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
_UpperCamelCase : List[str] = list(self.nodes.values())[0]
_UpperCamelCase : Dict = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCamelCase : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
_UpperCamelCase : str = False
# If there is 1 edge, we merge it with its child
else:
_UpperCamelCase : Optional[Any] = list(incoming_node.nodes.values())[0]
_UpperCamelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCamelCase : Any = merging_node.nodes
return True
def A__ ( self , __snake_case = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
_UpperCamelCase : Tuple = 'banana bananas bandana band apple all beast'.split()
_UpperCamelCase : int = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE_ )
assert all(root.find(SCREAMING_SNAKE_CASE_ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
assert test_trie()
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = RadixNode()
_UpperCamelCase : Tuple = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(SCREAMING_SNAKE_CASE_ )
print('Words:' , SCREAMING_SNAKE_CASE_ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self):
return 12
@property
def A__ ( self):
return 12
@property
def A__ ( self):
return 32
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A__ ( self):
_UpperCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A_)
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : Optional[int] = 12
_UpperCamelCase : Optional[int] = 12
_UpperCamelCase : Dict = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_UpperCamelCase : Any = TransformeraDModel(**A_)
return model
def A__ ( self):
_UpperCamelCase : Optional[Any] = "cpu"
_UpperCamelCase : Union[str, Any] = self.dummy_vqvae
_UpperCamelCase : Dict = self.dummy_text_encoder
_UpperCamelCase : List[str] = self.dummy_tokenizer
_UpperCamelCase : str = self.dummy_transformer
_UpperCamelCase : Optional[int] = VQDiffusionScheduler(self.num_embed)
_UpperCamelCase : Any = LearnedClassifierFreeSamplingEmbeddings(learnable=A_)
_UpperCamelCase : Any = VQDiffusionPipeline(
vqvae=A_ , text_encoder=A_ , tokenizer=A_ , transformer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
_UpperCamelCase : Any = pipe.to(A_)
pipe.set_progress_bar_config(disable=A_)
_UpperCamelCase : Union[str, Any] = "teddy bear playing in the pool"
_UpperCamelCase : int = torch.Generator(device=A_).manual_seed(0)
_UpperCamelCase : List[str] = pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='np')
_UpperCamelCase : List[str] = output.images
_UpperCamelCase : Union[str, Any] = torch.Generator(device=A_).manual_seed(0)
_UpperCamelCase : str = pipe(
[prompt] , generator=A_ , output_type='np' , return_dict=A_ , num_inference_steps=2)[0]
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_UpperCamelCase : Tuple = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def A__ ( self):
_UpperCamelCase : str = "cpu"
_UpperCamelCase : Tuple = self.dummy_vqvae
_UpperCamelCase : Dict = self.dummy_text_encoder
_UpperCamelCase : Union[str, Any] = self.dummy_tokenizer
_UpperCamelCase : Dict = self.dummy_transformer
_UpperCamelCase : List[Any] = VQDiffusionScheduler(self.num_embed)
_UpperCamelCase : Tuple = LearnedClassifierFreeSamplingEmbeddings(
learnable=A_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
_UpperCamelCase : List[str] = VQDiffusionPipeline(
vqvae=A_ , text_encoder=A_ , tokenizer=A_ , transformer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
_UpperCamelCase : int = pipe.to(A_)
pipe.set_progress_bar_config(disable=A_)
_UpperCamelCase : List[str] = "teddy bear playing in the pool"
_UpperCamelCase : List[str] = torch.Generator(device=A_).manual_seed(0)
_UpperCamelCase : List[str] = pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='np')
_UpperCamelCase : Any = output.images
_UpperCamelCase : List[Any] = torch.Generator(device=A_).manual_seed(0)
_UpperCamelCase : int = pipe(
[prompt] , generator=A_ , output_type='np' , return_dict=A_ , num_inference_steps=2)[0]
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_UpperCamelCase : Optional[Any] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
_UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
_UpperCamelCase : List[str] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
_UpperCamelCase : List[str] = pipeline.to(A_)
pipeline.set_progress_bar_config(disable=A_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_UpperCamelCase : int = torch.Generator(device=A_).manual_seed(0)
_UpperCamelCase : List[Any] = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=A_ , output_type='np' , )
_UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image).max() < 2.0
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowercase ( UpperCamelCase__ ):
"""simple docstring"""
a__ = None
a__ = None
@property
def A__ ( self):
return self.feat_extract_tester.prepare_feat_extract_dict()
def A__ ( self):
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__snake_case , 'feature_size'))
self.assertTrue(hasattr(__snake_case , 'sampling_rate'))
self.assertTrue(hasattr(__snake_case , 'padding_value'))
def A__ ( self):
_UpperCamelCase : str = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : Any = feat_extract.model_input_names[0]
_UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(__snake_case) == len(__snake_case) for x, y in zip(__snake_case , processed_features[input_name])))
_UpperCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case)
_UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='np')
_UpperCamelCase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def A__ ( self):
_UpperCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case)
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : Optional[int] = feat_extract.model_input_names[0]
_UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
_UpperCamelCase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def A__ ( self):
_UpperCamelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case)
_UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : str = feat_extract.model_input_names[0]
_UpperCamelCase : str = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
_UpperCamelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def A__ ( self , __snake_case=False):
def _inputs_have_equal_length(__snake_case):
_UpperCamelCase : Dict = len(input[0])
for input_slice in input[1:]:
if len(__snake_case) != length:
return False
return True
def _inputs_are_equal(__snake_case , __snake_case):
if len(__snake_case) != len(__snake_case):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case):
if not np.allclose(np.asarray(__snake_case) , np.asarray(__snake_case) , atol=1e-3):
return False
return True
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case)
_UpperCamelCase : Any = feat_extract.model_input_names[0]
_UpperCamelCase : str = BatchFeature({input_name: speech_inputs})
_UpperCamelCase : Optional[int] = self.feat_extract_tester.seq_length_diff
_UpperCamelCase : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
_UpperCamelCase : Union[str, Any] = self.feat_extract_tester.min_seq_length
_UpperCamelCase : Dict = self.feat_extract_tester.batch_size
_UpperCamelCase : Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_UpperCamelCase : int = feat_extract.pad(__snake_case , padding=__snake_case)
_UpperCamelCase : List[str] = input_a[input_name]
_UpperCamelCase : Optional[int] = feat_extract.pad(__snake_case , padding='longest')
_UpperCamelCase : List[Any] = input_a[input_name]
_UpperCamelCase : List[Any] = feat_extract.pad(__snake_case , padding='max_length' , max_length=len(speech_inputs[-1]))
_UpperCamelCase : int = input_a[input_name]
_UpperCamelCase : str = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np')
_UpperCamelCase : Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding='max_length')[input_name]
_UpperCamelCase : int = feat_extract.pad(
__snake_case , padding='max_length' , max_length=__snake_case , return_tensors='np')
_UpperCamelCase : Dict = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
_UpperCamelCase : Dict = feat_extract.pad(__snake_case , pad_to_multiple_of=10)
_UpperCamelCase : Dict = input_a[input_name]
_UpperCamelCase : Optional[Any] = feat_extract.pad(__snake_case , padding='longest' , pad_to_multiple_of=10)
_UpperCamelCase : Any = input_a[input_name]
_UpperCamelCase : List[Any] = feat_extract.pad(
__snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=__snake_case)
_UpperCamelCase : int = input_a[input_name]
_UpperCamelCase : int = feat_extract.pad(
__snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=__snake_case , return_tensors='np' , )
_UpperCamelCase : List[str] = input_a[input_name]
self.assertTrue(all(len(__snake_case) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case))
_UpperCamelCase : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__snake_case) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
_UpperCamelCase : int = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def A__ ( self , __snake_case=False):
def _inputs_have_equal_length(__snake_case):
_UpperCamelCase : int = len(input[0])
for input_slice in input[1:]:
if len(__snake_case) != length:
return False
return True
def _inputs_are_equal(__snake_case , __snake_case):
if len(__snake_case) != len(__snake_case):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case):
if not np.allclose(np.asarray(__snake_case) , np.asarray(__snake_case) , atol=1e-3):
return False
return True
_UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case)
_UpperCamelCase : Tuple = feat_extract.model_input_names[0]
_UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs})
# truncate to smallest
_UpperCamelCase : str = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=__snake_case)
_UpperCamelCase : int = input_a[input_name]
_UpperCamelCase : Optional[int] = feat_extract.pad(__snake_case , padding='max_length' , max_length=len(speech_inputs[0]))
_UpperCamelCase : Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertFalse(_inputs_have_equal_length(__snake_case))
# truncate to smallest with np
_UpperCamelCase : List[Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=__snake_case , )
_UpperCamelCase : Optional[int] = input_a[input_name]
_UpperCamelCase : Union[str, Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
_UpperCamelCase : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case))
# truncate to middle
_UpperCamelCase : Union[str, Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=__snake_case , return_tensors='np' , )
_UpperCamelCase : Tuple = input_a[input_name]
_UpperCamelCase : Union[str, Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=__snake_case)
_UpperCamelCase : Dict = input_a[input_name]
_UpperCamelCase : Optional[Any] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
_UpperCamelCase : Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , truncation=__snake_case)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding='longest' , truncation=__snake_case)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding='longest' , truncation=__snake_case)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding='max_length' , truncation=__snake_case)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_UpperCamelCase : Any = 12
_UpperCamelCase : Optional[int] = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
_UpperCamelCase : Optional[int] = input_a[input_name]
_UpperCamelCase : Any = feat_extract.pad(
__snake_case , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__snake_case , )
_UpperCamelCase : int = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_UpperCamelCase : Union[str, Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
_UpperCamelCase : Dict = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertFalse(_inputs_have_equal_length(__snake_case))
def A__ ( self):
self._check_padding(numpify=__snake_case)
def A__ ( self):
self._check_padding(numpify=__snake_case)
def A__ ( self):
self._check_truncation(numpify=__snake_case)
def A__ ( self):
self._check_truncation(numpify=__snake_case)
@require_torch
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase : List[Any] = feat_extract.model_input_names[0]
_UpperCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs})
_UpperCamelCase : str = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np')[input_name]
_UpperCamelCase : str = feat_extract.pad(__snake_case , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def A__ ( self):
_UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase : List[Any] = feat_extract.model_input_names[0]
_UpperCamelCase : str = BatchFeature({input_name: speech_inputs})
_UpperCamelCase : List[str] = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np')[input_name]
_UpperCamelCase : Any = feat_extract.pad(__snake_case , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def A__ ( self):
_UpperCamelCase : int = self.feat_extract_dict
_UpperCamelCase : str = True
_UpperCamelCase : Optional[Any] = self.feature_extraction_class(**__snake_case)
_UpperCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase : int = [len(__snake_case) for x in speech_inputs]
_UpperCamelCase : Optional[int] = feat_extract.model_input_names[0]
_UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs})
_UpperCamelCase : Any = feat_extract.pad(__snake_case , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , __snake_case)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , __snake_case)
def A__ ( self):
_UpperCamelCase : List[str] = self.feat_extract_dict
_UpperCamelCase : Any = True
_UpperCamelCase : Dict = self.feature_extraction_class(**__snake_case)
_UpperCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase : Union[str, Any] = [len(__snake_case) for x in speech_inputs]
_UpperCamelCase : List[Any] = feat_extract.model_input_names[0]
_UpperCamelCase : List[Any] = BatchFeature({input_name: speech_inputs})
_UpperCamelCase : Tuple = min(__snake_case)
_UpperCamelCase : int = feat_extract.pad(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='np')
self.assertIn('attention_mask' , __snake_case)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Any:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Input value must be an \'int\' type' )
_UpperCamelCase : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
def constraint_to_multiple_of(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : str=None ):
_UpperCamelCase : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase : Tuple = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase : Tuple = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
_UpperCamelCase , _UpperCamelCase : Optional[int] = get_image_size(lowercase__ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = output_size
# determine new height and width
_UpperCamelCase : Any = output_height / input_height
_UpperCamelCase : Optional[int] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase : Any = scale_width
else:
# fit height
_UpperCamelCase : Tuple = scale_height
_UpperCamelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
_UpperCamelCase : List[str] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class lowercase ( __a ):
"""simple docstring"""
a__ = ["pixel_values"]
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = False , __snake_case = 1 , __snake_case = True , __snake_case = 1 / 2_55 , __snake_case = True , __snake_case = None , __snake_case = None , **__snake_case , ):
super().__init__(**lowerCAmelCase_)
_UpperCamelCase : List[str] = size if size is not None else {'height': 3_84, 'width': 3_84}
_UpperCamelCase : Any = get_size_dict(lowerCAmelCase_)
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : int = size
_UpperCamelCase : List[Any] = keep_aspect_ratio
_UpperCamelCase : Tuple = ensure_multiple_of
_UpperCamelCase : int = resample
_UpperCamelCase : Optional[int] = do_rescale
_UpperCamelCase : List[Any] = rescale_factor
_UpperCamelCase : Optional[int] = do_normalize
_UpperCamelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , __snake_case , __snake_case , __snake_case = False , __snake_case = 1 , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ):
_UpperCamelCase : List[str] = get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''')
_UpperCamelCase : str = get_resize_output_image_size(
lowerCAmelCase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowerCAmelCase_ , multiple=lowerCAmelCase_ , )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def A__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def A__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
_UpperCamelCase : Any = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : List[Any] = get_size_dict(lowerCAmelCase_)
_UpperCamelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase : List[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Tuple = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : str = image_std if image_std is not None else self.image_std
_UpperCamelCase : Tuple = make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCamelCase : Union[str, Any] = [to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
_UpperCamelCase : Tuple = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_rescale:
_UpperCamelCase : Dict = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_) for image in images]
if do_normalize:
_UpperCamelCase : List[str] = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_) for image in images]
_UpperCamelCase : Tuple = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
_UpperCamelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_) != len(lowerCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowerCAmelCase_):
_UpperCamelCase : Tuple = target_sizes.numpy()
_UpperCamelCase : str = []
for idx in range(len(lowerCAmelCase_)):
_UpperCamelCase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCAmelCase_)
_UpperCamelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowerCAmelCase_)
else:
_UpperCamelCase : Optional[int] = logits.argmax(dim=1)
_UpperCamelCase : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase_ ( *UpperCAmelCase_ : Any ) -> Tuple:
with open(UpperCamelCase__ , 'r' ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
lowerCAmelCase__ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ = torch.device("""cuda""", local_rank)
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = f'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ = dist.get_rank()
lowerCAmelCase__ = dist.get_world_size()
printflock(f'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(f'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(f'{gpu} is broken')
raise
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "swinv2"
a__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __snake_case=2_24 , __snake_case=4 , __snake_case=3 , __snake_case=96 , __snake_case=[2, 2, 6, 2] , __snake_case=[3, 6, 12, 24] , __snake_case=7 , __snake_case=4.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.0_2 , __snake_case=1e-5 , __snake_case=32 , **__snake_case , ):
super().__init__(**UpperCAmelCase__)
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : int = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : int = embed_dim
_UpperCamelCase : List[str] = depths
_UpperCamelCase : Union[str, Any] = len(UpperCAmelCase__)
_UpperCamelCase : Tuple = num_heads
_UpperCamelCase : Any = window_size
_UpperCamelCase : Dict = mlp_ratio
_UpperCamelCase : Optional[Any] = qkv_bias
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Dict = drop_path_rate
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : List[str] = use_absolute_embeddings
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : int = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase : List[str] = int(embed_dim * 2 ** (len(UpperCAmelCase__) - 1))
_UpperCamelCase : Tuple = (0, 0, 0, 0)
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def A__ ( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self):
_UpperCamelCase : Optional[int] = ort.SessionOptions()
_UpperCamelCase : Union[str, Any] = False
return options
def A__ ( self):
_UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
_UpperCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
_UpperCamelCase : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__)
_UpperCamelCase : List[Any] = 'A red cat sitting on a park bench'
_UpperCamelCase : Tuple = np.random.RandomState(0)
_UpperCamelCase : Optional[Any] = pipe(
prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=10 , generator=A__ , output_type='np' , )
_UpperCamelCase : Tuple = output.images
_UpperCamelCase : str = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase : Optional[Any] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A__ ( self):
_UpperCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
_UpperCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
_UpperCamelCase : List[str] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx')
_UpperCamelCase : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__)
_UpperCamelCase : Dict = 'A red cat sitting on a park bench'
_UpperCamelCase : List[Any] = np.random.RandomState(0)
_UpperCamelCase : Any = pipe(
prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=20 , generator=A__ , output_type='np' , )
_UpperCamelCase : Optional[int] = output.images
_UpperCamelCase : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase : Optional[int] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str = 1_6 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
_UpperCamelCase : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase : List[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Any = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
_UpperCamelCase : Dict = 8
else:
_UpperCamelCase : Any = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
_UpperCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_UpperCamelCase : int = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1":
_UpperCamelCase : Any = 2
# Initialize accelerator
_UpperCamelCase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : str = config['lr']
_UpperCamelCase : List[str] = int(config['num_epochs'] )
_UpperCamelCase : str = int(config['seed'] )
_UpperCamelCase : List[Any] = int(config['batch_size'] )
_UpperCamelCase : int = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(UpperCAmelCase_ : Any ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase : str = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase : Tuple = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase : List[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_UpperCamelCase : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
_UpperCamelCase : str = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : str = model(**_SCREAMING_SNAKE_CASE )
_UpperCamelCase : int = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_UpperCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_UpperCamelCase : Dict = parser.parse_args()
_UpperCamelCase : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'allenai/led-base-16384': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_UpperCamelCase : int = bs[:]
_UpperCamelCase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase_ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase : int = [chr(lowerCAmelCase_ ) for n in cs]
return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = set()
_UpperCamelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase : Tuple = char
return pairs
class lowercase ( a__ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __snake_case , __snake_case , __snake_case="replace" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case=False , **__snake_case , ):
_UpperCamelCase : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else bos_token
_UpperCamelCase : Dict = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else eos_token
_UpperCamelCase : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else sep_token
_UpperCamelCase : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else cls_token
_UpperCamelCase : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else unk_token
_UpperCamelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : str = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding='utf-8') as vocab_handle:
_UpperCamelCase : List[str] = json.load(lowercase__)
_UpperCamelCase : Any = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Dict = errors # how to handle errors in decoding
_UpperCamelCase : Optional[int] = bytes_to_unicode()
_UpperCamelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ , encoding='utf-8') as merges_handle:
_UpperCamelCase : Tuple = merges_handle.read().split('\n')[1:-1]
_UpperCamelCase : Tuple = [tuple(merge.split()) for merge in bpe_merges]
_UpperCamelCase : int = dict(zip(lowercase__ , range(len(lowercase__))))
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase : Optional[int] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self):
return len(self.encoder)
def A__ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A__ ( self , __snake_case):
if token in self.cache:
return self.cache[token]
_UpperCamelCase : Optional[int] = tuple(lowercase__)
_UpperCamelCase : List[str] = get_pairs(lowercase__)
if not pairs:
return token
while True:
_UpperCamelCase : Dict = min(lowercase__ , key=lambda __snake_case: self.bpe_ranks.get(lowercase__ , float('inf')))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase : Any = bigram
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Tuple = 0
while i < len(lowercase__):
try:
_UpperCamelCase : Union[str, Any] = word.index(lowercase__ , lowercase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCamelCase : Union[str, Any] = j
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase : str = tuple(lowercase__)
_UpperCamelCase : Any = new_word
if len(lowercase__) == 1:
break
else:
_UpperCamelCase : str = get_pairs(lowercase__)
_UpperCamelCase : Union[str, Any] = ''' '''.join(lowercase__)
_UpperCamelCase : Optional[int] = word
return word
def A__ ( self , __snake_case):
_UpperCamelCase : List[str] = []
for token in re.findall(self.pat , lowercase__):
_UpperCamelCase : Optional[int] = ''''''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__).split(' '))
return bpe_tokens
def A__ ( self , __snake_case):
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A__ ( self , __snake_case):
return self.decoder.get(lowercase__)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = ''''''.join(lowercase__)
_UpperCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(lowercase__):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : Tuple = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_UpperCamelCase : List[str] = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowercase__ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '\n')
_UpperCamelCase : Dict = 0
with open(lowercase__ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
_UpperCamelCase : Optional[Any] = token_index
writer.write(' '.join(lowercase__) + '\n')
index += 1
return vocab_file, merge_file
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : str = [self.cls_token_id]
_UpperCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__)
if token_ids_a is None:
return [1] + ([0] * len(lowercase__)) + [1]
return [1] + ([0] * len(lowercase__)) + [1, 1] + ([0] * len(lowercase__)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A__ ( self , __snake_case , __snake_case=False , **__snake_case):
_UpperCamelCase : Union[str, Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowercase__) > 0 and not text[0].isspace()):
_UpperCamelCase : Dict = ''' ''' + text
return (text, kwargs)
def A__ ( self , __snake_case , __snake_case = None , __snake_case = PaddingStrategy.DO_NOT_PAD , __snake_case = None , __snake_case = None , ):
_UpperCamelCase : int = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase : Dict = len(encoded_inputs['global_attention_mask']) != len(lowercase__)
if needs_to_be_padded:
_UpperCamelCase : Any = len(lowercase__) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
_UpperCamelCase : int = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
_UpperCamelCase : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
_UpperCamelCase : List[str] = (
(
"""1"""
+ """0""" * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a__ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"text": Value("string" )} )
a__ = Features({"summary": Value("string" )} )
a__ = "text"
a__ = "summary"
@property
def A__ ( self):
return {self.text_column: "text", self.summary_column: "summary"}
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_UpperCamelCase : str = len(__snake_case) - 1
def A__ ( self , __snake_case):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCamelCase : Any = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , __snake_case) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__snake_case) , 5) == 1
return output_values
def A__ ( self , __snake_case):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCamelCase : List[str] = self.basis_function(__snake_case)
_UpperCamelCase : Dict = 0.0
_UpperCamelCase : Optional[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A__ ( self , __snake_case = 0.0_1):
from matplotlib import pyplot as plt # type: ignore
_UpperCamelCase : int = [] # x coordinates of points to plot
_UpperCamelCase : Optional[Any] = [] # y coordinates of points to plot
_UpperCamelCase : List[Any] = 0.0
while t <= 1:
_UpperCamelCase : Any = self.bezier_curve_function(__snake_case)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
_UpperCamelCase : Dict = [i[0] for i in self.list_of_points]
_UpperCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__snake_case , __snake_case , color='blue' , label='Curve of Degree ' + str(self.degree) , )
plt.scatter(__snake_case , __snake_case , color='red' , label='Control Points')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __A ( _UpperCamelCase ):
"""simple docstring"""
def A__ ( self , __snake_case):
with open(__a , encoding='utf-8') as input_file:
_UpperCamelCase : Dict = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
_UpperCamelCase : Dict = input_file.read()
_UpperCamelCase : List[str] = regexp.search(__a)
return match
def A__ ( self , __snake_case):
with open(__a , encoding='utf-8') as input_file:
_UpperCamelCase : str = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
_UpperCamelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase : int = regexp.finditer(__a)
_UpperCamelCase : Tuple = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def A__ ( self):
_UpperCamelCase : Dict = Path('./datasets')
_UpperCamelCase : List[str] = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a)):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''')
def A__ ( self):
_UpperCamelCase : List[str] = Path('./datasets')
_UpperCamelCase : str = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(__a)):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''')
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger("""transformers.models.encodec""")
lowerCAmelCase__ = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
lowerCAmelCase__ = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
lowerCAmelCase__ = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
lowerCAmelCase__ = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
lowerCAmelCase__ = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
lowerCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCamelCase : Optional[int] = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
_UpperCamelCase : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
_UpperCamelCase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_UpperCamelCase : List[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : Dict = value
elif weight_type == "weight_v":
_UpperCamelCase : List[str] = value
elif weight_type == "bias":
_UpperCamelCase : Any = value
elif weight_type == "running_mean":
_UpperCamelCase : str = value
elif weight_type == "running_var":
_UpperCamelCase : Dict = value
elif weight_type == "num_batches_tracked":
_UpperCamelCase : Union[str, Any] = value
elif weight_type == "weight_ih_l0":
_UpperCamelCase : Any = value
elif weight_type == "weight_hh_l0":
_UpperCamelCase : List[Any] = value
elif weight_type == "bias_ih_l0":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias_hh_l0":
_UpperCamelCase : Any = value
elif weight_type == "weight_ih_l1":
_UpperCamelCase : Any = value
elif weight_type == "weight_hh_l1":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias_ih_l1":
_UpperCamelCase : int = value
elif weight_type == "bias_hh_l1":
_UpperCamelCase : Union[str, Any] = value
else:
_UpperCamelCase : Tuple = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCamelCase , _UpperCamelCase : Tuple = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
'''simple docstring'''
_UpperCamelCase : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
_UpperCamelCase : str = MAPPING_24K
elif model_name == "encodec_48khz":
_UpperCamelCase : str = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
_UpperCamelCase : List[str] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_UpperCamelCase , _UpperCamelCase : str = key.split('.*.' )
if prefix in name and suffix in name:
_UpperCamelCase : Tuple = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
_UpperCamelCase : str = True
if "*" in mapped_key:
_UpperCamelCase : List[str] = name.split(__UpperCamelCase )[0].split('.' )[-2]
_UpperCamelCase : Any = mapped_key.replace('*' , __UpperCamelCase )
if "weight_g" in name:
_UpperCamelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
_UpperCamelCase : Dict = 'weight_v'
elif "weight_ih_l0" in name:
_UpperCamelCase : str = 'weight_ih_l0'
elif "weight_hh_l0" in name:
_UpperCamelCase : str = 'weight_hh_l0'
elif "bias_ih_l0" in name:
_UpperCamelCase : Tuple = 'bias_ih_l0'
elif "bias_hh_l0" in name:
_UpperCamelCase : Dict = 'bias_hh_l0'
elif "weight_ih_l1" in name:
_UpperCamelCase : Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
_UpperCamelCase : Tuple = 'weight_hh_l1'
elif "bias_ih_l1" in name:
_UpperCamelCase : List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
_UpperCamelCase : Union[str, Any] = 'bias_hh_l1'
elif "bias" in name:
_UpperCamelCase : str = 'bias'
elif "weight" in name:
_UpperCamelCase : Union[str, Any] = 'weight'
elif "running_mean" in name:
_UpperCamelCase : Optional[Any] = 'running_mean'
elif "running_var" in name:
_UpperCamelCase : Optional[int] = 'running_var'
elif "num_batches_tracked" in name:
_UpperCamelCase : str = 'num_batches_tracked'
else:
_UpperCamelCase : Tuple = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , ):
'''simple docstring'''
if config_path is not None:
_UpperCamelCase : Union[str, Any] = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
_UpperCamelCase : Any = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_UpperCamelCase : Optional[int] = [8, 5, 4, 4]
_UpperCamelCase : List[Any] = [2.2]
_UpperCamelCase : Any = 6_4
_UpperCamelCase : Optional[int] = 3_2_0_0_0
_UpperCamelCase : Union[str, Any] = 2_0_4_8
_UpperCamelCase : Dict = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
elif model_name == "encodec_48khz":
_UpperCamelCase : List[str] = [8, 5, 4, 2]
_UpperCamelCase : List[str] = [3.0, 6.0, 1_2.0, 2_4.0]
_UpperCamelCase : List[str] = 4_8_0_0_0
_UpperCamelCase : Any = 2
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = 'time_group_norm'
_UpperCamelCase : Any = True
_UpperCamelCase : str = 1.0
_UpperCamelCase : List[str] = 0.0_1
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_UpperCamelCase : Tuple = EncodecModel(__UpperCamelCase )
_UpperCamelCase : Tuple = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCamelCase )
_UpperCamelCase : List[str] = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_UpperCamelCase : List[str] = original_checkpoint['best_state']
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=4 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[int] = batch_size
_UpperCamelCase : List[str] = seq_length
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : List[str] = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Dict = num_choices
def A__ ( self):
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Dict = None
if self.use_attention_mask:
_UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , )
return config, input_ids, attention_mask
def A__ ( self):
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self):
_UpperCamelCase : Dict = FlaxDistilBertModelTester(self)
@slow
def A__ ( self):
for model_class_name in self.all_model_classes:
_UpperCamelCase : Any = model_class_name.from_pretrained('distilbert-base-uncased')
_UpperCamelCase : Optional[int] = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCAmelCase)
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : List[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
_UpperCamelCase : str = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCamelCase : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase)[0]
_UpperCamelCase : Dict = (1, 11, 7_68)
self.assertEqual(output.shape , _lowerCAmelCase)
_UpperCamelCase : Dict = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4))
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not postfix_notation:
return 0
_UpperCamelCase : Any = {"""+""", """-""", """*""", """/"""}
_UpperCamelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_UpperCamelCase : Optional[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(snake_case_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case):
_UpperCamelCase : int = question_encoder
_UpperCamelCase : int = generator
_UpperCamelCase : Optional[int] = self.question_encoder
def A__ ( self , __snake_case):
if os.path.isfile(UpperCamelCase__):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''')
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__)
_UpperCamelCase : Optional[Any] = os.path.join(UpperCamelCase__ , 'question_encoder_tokenizer')
_UpperCamelCase : List[Any] = os.path.join(UpperCamelCase__ , 'generator_tokenizer')
self.question_encoder.save_pretrained(UpperCamelCase__)
self.generator.save_pretrained(UpperCamelCase__)
@classmethod
def A__ ( cls , __snake_case , **__snake_case):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_UpperCamelCase : int = kwargs.pop('config' , UpperCamelCase__)
if config is None:
_UpperCamelCase : Any = RagConfig.from_pretrained(UpperCamelCase__)
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
UpperCamelCase__ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
UpperCamelCase__ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=UpperCamelCase__ , generator=UpperCamelCase__)
def __call__( self , *__snake_case , **__snake_case):
return self.current_tokenizer(*UpperCamelCase__ , **UpperCamelCase__)
def A__ ( self , *__snake_case , **__snake_case):
return self.generator.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def A__ ( self , *__snake_case , **__snake_case):
return self.generator.decode(*UpperCamelCase__ , **UpperCamelCase__)
def A__ ( self):
_UpperCamelCase : Dict = self.question_encoder
def A__ ( self):
_UpperCamelCase : Optional[int] = self.generator
def A__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = "longest" , __snake_case = None , __snake_case = True , **__snake_case , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , UpperCamelCase__ , )
if max_length is None:
_UpperCamelCase : str = self.current_tokenizer.model_max_length
_UpperCamelCase : Any = self(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , **UpperCamelCase__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_UpperCamelCase : List[Any] = self.current_tokenizer.model_max_length
_UpperCamelCase : Dict = self(
text_target=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , **UpperCamelCase__ , )
_UpperCamelCase : Any = labels['''input_ids''']
return model_inputs
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
with open(lowerCamelCase_ ) as metadata_file:
_UpperCamelCase : int = json.load(lowerCamelCase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
# Load the entity vocab file
_UpperCamelCase : Dict = load_entity_vocab(lowerCamelCase_ )
_UpperCamelCase : Tuple = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : int = AddedToken('<ent>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
_UpperCamelCase : List[str] = AddedToken('<ent2>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase : Any = LukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : Tuple = state_dict['embeddings.word_embeddings.weight']
_UpperCamelCase : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
_UpperCamelCase : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
_UpperCamelCase : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Optional[int] = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCamelCase : Optional[int] = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Union[str, Any] = state_dict['entity_embeddings.entity_embeddings.weight']
_UpperCamelCase : str = entity_emb[entity_vocab['[MASK]']]
_UpperCamelCase : Any = LukeModel(config=lowerCamelCase_ ).eval()
_UpperCamelCase , _UpperCamelCase : int = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if not (len(lowerCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(lowerCamelCase_ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
_UpperCamelCase : int = LukeTokenizer.from_pretrained(lowerCamelCase_ , task='entity_classification' )
_UpperCamelCase : Optional[int] = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
_UpperCamelCase : List[str] = (3_9, 4_2)
_UpperCamelCase : Tuple = tokenizer(lowerCamelCase_ , entity_spans=[span] , add_prefix_space=lowerCamelCase_ , return_tensors='pt' )
_UpperCamelCase : Dict = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
_UpperCamelCase : Optional[Any] = torch.Size((1, 4_2, 1_0_2_4) )
_UpperCamelCase : Optional[Any] = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 4_2, 7_6_8) )
_UpperCamelCase : List[str] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCamelCase : str = torch.Size((1, 1, 1_0_2_4) )
_UpperCamelCase : Union[str, Any] = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 7_6_8) )
_UpperCamelCase : Tuple = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = {}
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowerCamelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = line.rstrip().split('\t' )
_UpperCamelCase : Optional[int] = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCAmelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ):
_UpperCamelCase : int = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[int] = is_training
_UpperCamelCase : Any = use_input_lengths
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : List[Any] = use_labels
_UpperCamelCase : List[Any] = gelu_activation
_UpperCamelCase : Union[str, Any] = sinusoidal_embeddings
_UpperCamelCase : List[str] = causal
_UpperCamelCase : str = asm
_UpperCamelCase : Union[str, Any] = n_langs
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Optional[Any] = n_special
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_sequence_label_size
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : List[str] = num_labels
_UpperCamelCase : str = num_choices
_UpperCamelCase : Optional[Any] = summary_type
_UpperCamelCase : List[Any] = use_proj
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = bos_token_id
def A__ ( self):
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : str = None
if self.use_input_lengths:
_UpperCamelCase : Dict = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , 2).float()
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A__ ( self):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : Dict = XLMModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , lengths=__snake_case , langs=__snake_case)
_UpperCamelCase : Any = model(__snake_case , langs=__snake_case)
_UpperCamelCase : str = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : Dict = XLMWithLMHeadModel(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : Any = XLMForQuestionAnsweringSimple(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
_UpperCamelCase : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case)
_UpperCamelCase : List[str] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : List[str] = XLMForQuestionAnswering(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case)
_UpperCamelCase : Union[str, Any] = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
_UpperCamelCase : Any = model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
(_UpperCamelCase ) : str = result_with_labels.to_tuple()
_UpperCamelCase : Any = model(__snake_case , start_positions=__snake_case , end_positions=__snake_case)
(_UpperCamelCase ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : List[str] = XLMForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case)
_UpperCamelCase : int = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : List[str] = XLMForTokenClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : int = model(__snake_case , attention_mask=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase : str = self.num_choices
_UpperCamelCase : Union[str, Any] = XLMForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : int = config_and_inputs
_UpperCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class lowercase ( _A , _A , _A , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : Tuple = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_UpperCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
_UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = XLMModelTester(self)
_UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__snake_case , emb_dim=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1):
self.assertIsInstance(__snake_case , __snake_case)
self.assertListEqual(
[isinstance(__snake_case , __snake_case) for iter_attentions in attentions] , [True] * len(__snake_case))
self.assertEqual(len(__snake_case) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(__snake_case):
# adds PAD dummy token
_UpperCamelCase : List[str] = min_length + idx + 1
_UpperCamelCase : Dict = min_length + idx + 1
_UpperCamelCase : List[str] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1):
self.assertIsInstance(__snake_case , __snake_case)
self.assertListEqual(
[isinstance(__snake_case , __snake_case) for iter_hidden_states in hidden_states] , [True] * len(__snake_case) , )
self.assertEqual(len(__snake_case) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(__snake_case):
# adds PAD dummy token
_UpperCamelCase : Dict = min_length + idx + 1
_UpperCamelCase : str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case) , )
pass
@slow
def A__ ( self):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = XLMModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : Optional[int] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(__snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[14, 4_47]] , dtype=torch.long , device=__snake_case) # the president
_UpperCamelCase : str = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_UpperCamelCase : Optional[Any] = model.generate(__snake_case , do_sample=__snake_case)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case)
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class lowercase ( __lowercase ):
"""simple docstring"""
a__ = 'deberta-v2'
def __init__( self , __snake_case=12_81_00 , __snake_case=15_36 , __snake_case=24 , __snake_case=24 , __snake_case=61_44 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=0 , __snake_case=0.0_2 , __snake_case=1e-7 , __snake_case=False , __snake_case=-1 , __snake_case=0 , __snake_case=True , __snake_case=None , __snake_case=0 , __snake_case="gelu" , **__snake_case , ):
super().__init__(**__A)
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Any = relative_attention
_UpperCamelCase : Union[str, Any] = max_relative_positions
_UpperCamelCase : List[Any] = pad_token_id
_UpperCamelCase : List[str] = position_biased_input
# Backwards compatibility
if type(__A) == str:
_UpperCamelCase : List[Any] = [x.strip() for x in pos_att_type.lower().split('|')]
_UpperCamelCase : Tuple = pos_att_type
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : int = layer_norm_eps
_UpperCamelCase : Optional[int] = kwargs.get('pooler_hidden_size' , __A)
_UpperCamelCase : List[Any] = pooler_dropout
_UpperCamelCase : Optional[int] = pooler_hidden_act
class lowercase ( __lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def A__ ( self):
return 12
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , __snake_case = 3 , __snake_case = 40 , __snake_case = 40 , __snake_case = None , ):
_UpperCamelCase : List[str] = super().generate_dummy_inputs(preprocessor=__A , framework=__A)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : list[int | float] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Dict:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCamelCase__ )
or left < -len(UpperCamelCase__ )
or right >= len(UpperCamelCase__ )
or right < -len(UpperCamelCase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
_UpperCamelCase : Dict = (left + right) >> 1 # the middle
_UpperCamelCase : Tuple = find_max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # find max in range[left, mid]
_UpperCamelCase : Any = find_max(UpperCamelCase__ , mid + 1 , UpperCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = '''src/diffusers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
lowerCAmelCase__ = '''
{0} = None
'''
lowerCAmelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowerCAmelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = _re_backend.findall(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
return "_and_".join(_UpperCAmelCase )
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
with open(os.path.join(_UpperCAmelCase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCamelCase : Optional[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
_UpperCamelCase : List[str] = 0
_UpperCamelCase : str = {}
# Go through the end of the file
while line_index < len(_UpperCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_UpperCamelCase : List[Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
_UpperCamelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCAmelCase ) and len(lines[line_index] ) > 1:
_UpperCamelCase : Tuple = lines[line_index]
_UpperCamelCase : str = _re_single_line_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCAmelCase ) > 0:
_UpperCamelCase : Tuple = objects
else:
line_index += 1
return backend_specific_objects
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase )
else:
return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase_ ( UpperCAmelCase_ : int=None ) -> Union[str, Any]:
'''simple docstring'''
if backend_specific_objects is None:
_UpperCamelCase : Union[str, Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_UpperCamelCase : Dict = {}
for backend, objects in backend_specific_objects.items():
_UpperCamelCase : int = '[' + ', '.join(F'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
_UpperCamelCase : Union[str, Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase ) for o in objects] )
_UpperCamelCase : Tuple = dummy_file
return dummy_files
def lowerCamelCase_ ( UpperCAmelCase_ : List[str]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_UpperCamelCase : Any = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
_UpperCamelCase : int = os.path.join(_UpperCAmelCase , 'utils' )
_UpperCamelCase : Any = {
backend: os.path.join(_UpperCAmelCase , F'''dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
_UpperCamelCase : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCamelCase : Dict = f.read()
else:
_UpperCamelCase : Optional[int] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'''diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[int] = emb.weight.shape
_UpperCamelCase : str = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
_UpperCamelCase : int = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = {}
for old_key in state_dict.keys():
_UpperCamelCase : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCamelCase : Any = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
_UpperCamelCase : Union[str, Any] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_UpperCamelCase : List[Any] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_UpperCamelCase : List[Any] = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_UpperCamelCase : Dict = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_UpperCamelCase : Union[str, Any] = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_UpperCamelCase : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_UpperCamelCase : Tuple = key.replace('final_layer_norm' , 'ff_layer_norm' )
_UpperCamelCase : Tuple = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = WEIGHTS_NAME ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = []
_UpperCamelCase : Optional[int] = 0
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
for expert in range(UpperCAmelCase_ ):
_UpperCamelCase : int = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ )['model']
remove_ignore_keys_(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = rename_fairseq_keys(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = os.path.join(
UpperCAmelCase_ , weights_name.replace('.bin' , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(UpperCAmelCase_ )[0]].dtype )
# Add the last block
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase_ , weights_name.replace('.bin' , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) )
_UpperCamelCase : List[str] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = rename_fairseq_keys(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(UpperCAmelCase_ ) == 1:
_UpperCamelCase : List[str] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
# Otherwise, let's build the index
_UpperCamelCase : Union[str, Any] = {}
for idx, shard in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin''' )
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase_ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
for key in shard:
_UpperCamelCase : Optional[Any] = shard_file
# Add the metadata
_UpperCamelCase : Tuple = {'total_size': total_size}
_UpperCamelCase : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' , encoding='utf-8' ) as f:
_UpperCamelCase : int = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + '\n'
f.write(UpperCAmelCase_ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ , lowerCAmelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
lowerCAmelCase__ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "summarization"
a__ = ["loss"]
a__ = ROUGE_KEYS
a__ = "rouge2"
def __init__( self , __snake_case , **__snake_case):
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCamelCase : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(__A , num_labels=__A , mode=self.mode , **__A)
use_task_specific_params(self.model , 'summarization')
save_git_info(self.hparams.output_dir)
_UpperCamelCase : Optional[int] = Path(self.output_dir) / 'metrics.json'
_UpperCamelCase : Any = Path(self.output_dir) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path)
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = defaultdict(__A)
_UpperCamelCase : int = self.config.model_type
_UpperCamelCase : Optional[int] = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
_UpperCamelCase : List[Any] = {
'data_dir': self.hparams.data_dir,
'max_source_length': self.hparams.max_source_length,
'prefix': self.model.config.prefix or '',
}
_UpperCamelCase : Dict = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
_UpperCamelCase : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCamelCase : Optional[int] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
_UpperCamelCase : int = get_git_info()['repo_sha']
_UpperCamelCase : Optional[int] = hparams.num_workers
_UpperCamelCase : List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __A):
_UpperCamelCase : Union[str, Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCamelCase : Union[str, Any] = self.decoder_start_token_id
_UpperCamelCase : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch') else LegacySeqaSeqDataset
)
_UpperCamelCase : Any = False
_UpperCamelCase : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCamelCase : str = self.hparams.eval_max_gen_length
else:
_UpperCamelCase : int = self.model.config.max_length
_UpperCamelCase : Dict = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A__ ( self , __snake_case):
_UpperCamelCase : Dict = {
k: self.tokenizer.batch_decode(v.tolist()) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__A , Path(self.output_dir) / 'text_batch.json')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / 'tok_batch.json')
_UpperCamelCase : Dict = True
return readable_batch
def A__ ( self , __snake_case , **__snake_case):
return self.model(__A , **__A)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer.batch_decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A)
return lmap(str.strip , __A)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = self.tokenizer.pad_token_id
_UpperCamelCase , _UpperCamelCase : Any = batch['input_ids'], batch['attention_mask']
_UpperCamelCase : Optional[int] = batch['labels']
if isinstance(self.model , __A):
_UpperCamelCase : str = self.model._shift_right(__A)
else:
_UpperCamelCase : Optional[int] = shift_tokens_right(__A , __A)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCamelCase : List[Any] = decoder_input_ids
self.save_readable_batch(__A)
_UpperCamelCase : Dict = self(__A , attention_mask=__A , decoder_input_ids=__A , use_cache=__A)
_UpperCamelCase : Optional[Any] = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCamelCase : str = nn.CrossEntropyLoss(ignore_index=__A)
assert lm_logits.shape[-1] == self.vocab_size
_UpperCamelCase : Tuple = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
_UpperCamelCase : List[Any] = nn.functional.log_softmax(__A , dim=-1)
_UpperCamelCase , _UpperCamelCase : Optional[int] = label_smoothed_nll_loss(
__A , __A , self.hparams.label_smoothing , ignore_index=__A)
return (loss,)
@property
def A__ ( self):
return self.tokenizer.pad_token_id
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = self._step(__A)
_UpperCamelCase : List[str] = dict(zip(self.loss_names , __A))
# tokens per batch
_UpperCamelCase : int = batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum()
_UpperCamelCase : List[Any] = batch['input_ids'].shape[0]
_UpperCamelCase : Union[str, Any] = batch['input_ids'].eq(self.pad).sum()
_UpperCamelCase : Tuple = batch['input_ids'].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A__ ( self , __snake_case , __snake_case):
return self._generative_step(__A)
def A__ ( self , __snake_case , __snake_case="val"):
self.step_count += 1
_UpperCamelCase : List[Any] = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
_UpperCamelCase : Optional[int] = losses['loss']
_UpperCamelCase : Optional[int] = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
_UpperCamelCase : int = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCamelCase : int = torch.tensor(__A).type_as(__A)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(__A)
_UpperCamelCase : int = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_UpperCamelCase : List[str] = self.step_count
self.metrics[prefix].append(__A) # callback writes this to self.metrics_save_path
_UpperCamelCase : Any = flatten_list([x['preds'] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def A__ ( self , __snake_case , __snake_case):
return calculate_rouge(__A , __A)
def A__ ( self , __snake_case):
_UpperCamelCase : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCamelCase : List[str] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=__A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCamelCase : List[Any] = (time.time() - ta) / batch['input_ids'].shape[0]
_UpperCamelCase : List[Any] = self.ids_to_clean_text(__A)
_UpperCamelCase : Union[str, Any] = self.ids_to_clean_text(batch['labels'])
_UpperCamelCase : List[str] = self._step(__A)
_UpperCamelCase : Union[str, Any] = dict(zip(self.loss_names , __A))
_UpperCamelCase : Optional[int] = self.calc_generative_metrics(__A , __A)
_UpperCamelCase : List[Any] = np.mean(lmap(__A , __A))
base_metrics.update(gen_time=__A , gen_len=__A , preds=__A , target=__A , **__A)
return base_metrics
def A__ ( self , __snake_case , __snake_case):
return self._generative_step(__A)
def A__ ( self , __snake_case):
return self.validation_epoch_end(__A , prefix='test')
def A__ ( self , __snake_case):
_UpperCamelCase : Union[str, Any] = self.n_obs[type_path]
_UpperCamelCase : List[Any] = self.target_lens[type_path]
_UpperCamelCase : str = self.dataset_class(
self.tokenizer , type_path=__A , n_obs=__A , max_target_length=__A , **self.dataset_kwargs , )
return dataset
def A__ ( self , __snake_case , __snake_case , __snake_case = False):
_UpperCamelCase : Dict = self.get_dataset(__A)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCamelCase : int = dataset.make_sortish_sampler(__A , distributed=self.hparams.gpus > 1)
return DataLoader(
__A , batch_size=__A , collate_fn=dataset.collate_fn , shuffle=__A , num_workers=self.num_workers , sampler=__A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCamelCase : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
__A , batch_sampler=__A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__A , batch_size=__A , collate_fn=dataset.collate_fn , shuffle=__A , num_workers=self.num_workers , sampler=__A , )
def A__ ( self):
_UpperCamelCase : Any = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=__A)
return dataloader
def A__ ( self):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size)
def A__ ( self):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def A__ ( __snake_case , __snake_case):
BaseTransformer.add_model_specific_args(__A , __A)
add_generic_args(__A , __A)
parser.add_argument(
'--max_source_length' , default=10_24 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_42 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_42 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true')
parser.add_argument('--freeze_embeds' , action='store_true')
parser.add_argument('--sortish_sampler' , action='store_true' , default=__A)
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=__A)
parser.add_argument('--max_tokens_per_batch' , type=__A , default=__A)
parser.add_argument('--logger_name' , type=__A , choices=['default', 'wandb', 'wandb_shared'] , default='default')
parser.add_argument('--n_train' , type=__A , default=-1 , required=__A , help='# examples. -1 means use all.')
parser.add_argument('--n_val' , type=__A , default=5_00 , required=__A , help='# examples. -1 means use all.')
parser.add_argument('--n_test' , type=__A , default=-1 , required=__A , help='# examples. -1 means use all.')
parser.add_argument(
'--task' , type=__A , default='summarization' , required=__A , help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing' , type=__A , default=0.0 , required=__A)
parser.add_argument('--src_lang' , type=__A , default='' , required=__A)
parser.add_argument('--tgt_lang' , type=__A , default='' , required=__A)
parser.add_argument('--eval_beams' , type=__A , default=__A , required=__A)
parser.add_argument(
'--val_metric' , type=__A , default=__A , required=__A , choices=['bleu', 'rouge2', 'loss', None])
parser.add_argument('--eval_max_gen_length' , type=__A , default=__A , help='never generate more than n tokens')
parser.add_argument('--save_top_k' , type=__A , default=1 , required=__A , help='How many checkpoints to save')
parser.add_argument(
'--early_stopping_patience' , type=__A , default=-1 , required=__A , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "translation"
a__ = ["loss"]
a__ = ["bleu"]
a__ = "bleu"
def __init__( self , __snake_case , **__snake_case):
super().__init__(__A , **__A)
_UpperCamelCase : str = hparams.src_lang
_UpperCamelCase : Dict = hparams.tgt_lang
def A__ ( self , __snake_case , __snake_case):
return calculate_bleu(__A , __A)
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None ) -> str:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCamelCase : Dict = SummarizationModule(_lowercase )
else:
_UpperCamelCase : int = TranslationModule(_lowercase )
_UpperCamelCase : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
_UpperCamelCase : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase : Union[str, Any] = os.environ.get('WANDB_PROJECT' , _lowercase )
_UpperCamelCase : Any = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase : Dict = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_UpperCamelCase : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCamelCase : int = False
_UpperCamelCase : Dict = args.val_metric == 'loss'
_UpperCamelCase : List[str] = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
_UpperCamelCase : Optional[int] = ''
_UpperCamelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=_lowercase ) )
if checkpoints:
_UpperCamelCase : Optional[Any] = checkpoints[-1]
_UpperCamelCase : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(parser)
lowerCAmelCase__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase__ = parser.parse_args()
main(args)
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
import math
import sys
import cva
import numpy as np
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float ) -> int:
'''simple docstring'''
_UpperCamelCase : int = math.sqrt(_lowerCamelCase )
_UpperCamelCase : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
_UpperCamelCase : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Tuple = np.zeros(img.shape )
_UpperCamelCase : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
_UpperCamelCase : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_UpperCamelCase : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCamelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
_UpperCamelCase : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
_UpperCamelCase : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
_UpperCamelCase : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
_UpperCamelCase : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
_UpperCamelCase : Optional[Any] = val
return imga
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
_UpperCamelCase : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
_UpperCamelCase : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_UpperCamelCase : Any = int(args[4] )
_UpperCamelCase : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
_UpperCamelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parse_args(sys.argv)
lowerCAmelCase__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowerCAmelCase__ = img / 2_5_5
lowerCAmelCase__ = out.astype("""float32""")
lowerCAmelCase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase__ = out * 2_5_5
lowerCAmelCase__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase_ ( UpperCAmelCase_ : Dataset , UpperCAmelCase_ : Dict[str, str] ) -> List[str]:
_UpperCamelCase : int = args.log_outputs
_UpperCamelCase : str = """_""".join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_UpperCamelCase : str = load_metric('wer' )
_UpperCamelCase : Optional[int] = load_metric('cer' )
# compute metrics
_UpperCamelCase : Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
_UpperCamelCase : Union[str, Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
_UpperCamelCase : Union[str, Any] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(lowercase_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_UpperCamelCase : int = F'''log_{dataset_id}_predictions.txt'''
_UpperCamelCase : List[str] = F'''log_{dataset_id}_targets.txt'''
with open(lowercase_ , 'w' ) as p, open(lowercase_ , 'w' ) as t:
# mapping function to write output
def write_to_file(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowercase_ , with_indices=lowercase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
_UpperCamelCase : Optional[int] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_UpperCamelCase : int = re.sub(lowercase_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_UpperCamelCase : Optional[Any] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_UpperCamelCase : Optional[Any] = """ """.join(text.split(lowercase_ ) )
return text
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Any:
# load dataset
_UpperCamelCase : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_UpperCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
_UpperCamelCase : List[Any] = feature_extractor.sampling_rate
# resample audio
_UpperCamelCase : List[str] = dataset.cast_column('audio' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
_UpperCamelCase : List[Any] = 0 if torch.cuda.is_available() else -1
_UpperCamelCase : Optional[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCAmelCase_ : str ):
_UpperCamelCase : Optional[Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_UpperCamelCase : str = prediction["""text"""]
_UpperCamelCase : Dict = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_UpperCamelCase : int = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `\'en\'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `\'test\'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import os
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.dirname(lowerCAmelCase__ ) + '/grid.txt' ) as f:
_UpperCamelCase : Dict = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
_UpperCamelCase : Any = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
_UpperCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCamelCase : Any = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
_UpperCamelCase : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCamelCase : str = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
_UpperCamelCase : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCamelCase : Optional[Any] = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
_UpperCamelCase : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCamelCase : Optional[int] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( snake_case_ ):
"""simple docstring"""
a__ = """ClapFeatureExtractor"""
a__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __snake_case , __snake_case):
super().__init__(__snake_case , __snake_case)
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case):
_UpperCamelCase : Optional[Any] = kwargs.pop('sampling_rate' , __snake_case)
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.')
if text is not None:
_UpperCamelCase : Union[str, Any] = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case)
if audios is not None:
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
__snake_case , sampling_rate=__snake_case , return_tensors=__snake_case , **__snake_case)
if text is not None and audios is not None:
_UpperCamelCase : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case) , tensor_type=__snake_case)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.decode(*__snake_case , **__snake_case)
@property
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCamelCase : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
lowerCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Dict = mock.Mock()
_UpperCamelCase : List[str] = 5_00
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Union[str, Any] = HTTPError
_UpperCamelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__snake_case) as mock_head:
_UpperCamelCase : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self):
_UpperCamelCase : Any = mock.Mock()
_UpperCamelCase : Dict = 5_00
_UpperCamelCase : str = {}
_UpperCamelCase : Tuple = HTTPError
_UpperCamelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : int = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__snake_case) as mock_head:
_UpperCamelCase : Union[str, Any] = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self):
try:
_UpperCamelCase : Dict = tempfile.mktemp()
with open(__snake_case , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __snake_case)
_UpperCamelCase : Any = AlbertTokenizer.from_pretrained(__snake_case)
finally:
os.remove(__snake_case)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __snake_case)
_UpperCamelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def A__ ( self):
_UpperCamelCase : Optional[int] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls):
_UpperCamelCase : str = TOKEN
HfFolder.save_token(__snake_case)
@classmethod
def A__ ( cls):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def A__ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : str = os.path.join(__snake_case , 'vocab.txt')
with open(__snake_case , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : List[Any] = BertTokenizer(__snake_case)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
_UpperCamelCase : Optional[Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case , repo_id='test-tokenizer' , push_to_hub=__snake_case , use_auth_token=self._token)
_UpperCamelCase : List[Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def A__ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = os.path.join(__snake_case , 'vocab.txt')
with open(__snake_case , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : Optional[Any] = BertTokenizer(__snake_case)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
_UpperCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__snake_case , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__snake_case , use_auth_token=self._token)
_UpperCamelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def A__ ( self):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Dict = os.path.join(__snake_case , 'vocab.txt')
with open(__snake_case , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : List[str] = CustomTokenizer(__snake_case)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
_UpperCamelCase : Any = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : int = os.path.join(__snake_case , 'vocab.txt')
with open(__snake_case , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
_UpperCamelCase : str = BertTokenizerFast.from_pretrained(__snake_case)
bert_tokenizer.save_pretrained(__snake_case)
_UpperCamelCase : Union[str, Any] = CustomTokenizerFast.from_pretrained(__snake_case)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__snake_case , trust_remote_code=__snake_case)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Union[str, Any] = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def A__ ( self):
_UpperCamelCase : Optional[Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def A__ ( self):
_UpperCamelCase : Union[str, Any] = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def A__ ( self):
_UpperCamelCase : int = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def A__ ( self):
_UpperCamelCase : Any = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def A__ ( self):
_UpperCamelCase : str = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def A__ ( self):
_UpperCamelCase : Any = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def A__ ( self):
_UpperCamelCase : Any = Trie()
_UpperCamelCase : str = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(__snake_case , ['AB', 'C'])
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
from random import randint, random
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 5 , ) -> str:
'''simple docstring'''
_UpperCamelCase : List[Any] = [[-1] * number_of_cells] # Create a highway without any car
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : int = max(__lowercase , 0 )
while i < number_of_cells:
_UpperCamelCase : Any = (
randint(0 , __lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : str = highway_now[car_index + 1 :]
for cell in range(len(__lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowercase , -1 )
def lowerCamelCase_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : float , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = len(__lowercase )
# Beforce calculations, the highway is empty
_UpperCamelCase : Tuple = [-1] * number_of_cells
for car_index in range(__lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCamelCase : str = min(highway_now[car_index] + 1 , __lowercase )
# Number of empty cell before the next car
_UpperCamelCase : Optional[Any] = get_distance(__lowercase , __lowercase ) - 1
# We can't have the car causing an accident
_UpperCamelCase : Union[str, Any] = min(next_highway[car_index] , __lowercase )
if random() < probability:
# Randomly, a driver will slow down
_UpperCamelCase : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = len(highway[0] )
for i in range(__lowercase ):
_UpperCamelCase : Any = update(highway[i] , __lowercase , __lowercase )
_UpperCamelCase : List[Any] = [-1] * number_of_cells
for car_index in range(__lowercase ):
_UpperCamelCase : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCamelCase : Any = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCamelCase : Union[str, Any] = speed
highway.append(__lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
inspect_dataset(__snake_case , __snake_case )
_UpperCamelCase : List[str] = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> int:
'''simple docstring'''
inspect_metric(__snake_case , __snake_case )
_UpperCamelCase : Optional[Any] = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : str = get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : int = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : int = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = get_dataset_infos(__snake_case )
assert expected_config in infos
_UpperCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.