code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = 42
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Tuple = [[] for _ in range(snake_case__)]
_UpperCamelCase : Union[str, Any] = size
def __getitem__( self , __snake_case):
return iter(self._graph[vertex])
@property
def A__ ( self):
return self._size
def A__ ( self , __snake_case , __snake_case , __snake_case):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).')
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__))
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : str = deque([start_vertex])
_UpperCamelCase : Any = [None] * self.size
_UpperCamelCase : Optional[int] = 0
while queue:
_UpperCamelCase : Union[str, Any] = queue.popleft()
_UpperCamelCase : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCamelCase : int = current_distance + edge.weight
_UpperCamelCase : List[Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__)
and new_distance >= dest_vertex_distance
):
continue
_UpperCamelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __A ( __lowerCamelCase ):
"""simple docstring"""
a__ = 42
a__ = 42
class __A ( nn.Module ):
"""simple docstring"""
a__ = 42
a__ = (1_6, 3_2, 9_6, 2_5_6)
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : Optional[int] = []
for i in range(len(self.block_out_channels) - 1):
_UpperCamelCase : str = self.block_out_channels[i]
_UpperCamelCase : List[Any] = self.block_out_channels[i + 1]
_UpperCamelCase : Tuple = nn.Conv(
a_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_)
_UpperCamelCase : Optional[Any] = nn.Conv(
a_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_)
_UpperCamelCase : Union[str, Any] = blocks
_UpperCamelCase : int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __snake_case):
_UpperCamelCase : Optional[Any] = self.conv_in(a_)
_UpperCamelCase : Optional[Any] = nn.silu(a_)
for block in self.blocks:
_UpperCamelCase : str = block(a_)
_UpperCamelCase : str = nn.silu(a_)
_UpperCamelCase : Any = self.conv_out(a_)
return embedding
@flax_register_to_config
class __A ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
a__ = 3_2
a__ = 4
a__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a__ = False
a__ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
a__ = 2
a__ = 8
a__ = None
a__ = 1_2_8_0
a__ = 0.0
a__ = False
a__ = jnp.floataa
a__ = True
a__ = 0
a__ = "rgb"
a__ = (1_6, 3_2, 9_6, 2_5_6)
def A__ ( self , __snake_case):
# init input tensors
_UpperCamelCase : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase : Union[str, Any] = jnp.zeros(a_ , dtype=jnp.floataa)
_UpperCamelCase : int = jnp.ones((1,) , dtype=jnp.intaa)
_UpperCamelCase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
_UpperCamelCase : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCamelCase : List[str] = jnp.zeros(a_ , dtype=jnp.floataa)
_UpperCamelCase : List[str] = jax.random.split(a_)
_UpperCamelCase : Dict = {"params": params_rng, "dropout": dropout_rng}
return self.init(a_ , a_ , a_ , a_ , a_)["params"]
def A__ ( self):
_UpperCamelCase : Tuple = self.block_out_channels
_UpperCamelCase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase : List[str] = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
_UpperCamelCase : int = FlaxTimestepEmbedding(a_ , dtype=self.dtype)
_UpperCamelCase : List[str] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCamelCase : Dict = self.only_cross_attention
if isinstance(a_ , a_):
_UpperCamelCase : List[Any] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(a_ , a_):
_UpperCamelCase : Optional[Any] = (num_attention_heads,) * len(self.down_block_types)
# down
_UpperCamelCase : Tuple = []
_UpperCamelCase : List[str] = []
_UpperCamelCase : Dict = block_out_channels[0]
_UpperCamelCase : Any = nn.Conv(
a_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_)
for i, down_block_type in enumerate(self.down_block_types):
_UpperCamelCase : List[str] = output_channel
_UpperCamelCase : Any = block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(a_) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase : Dict = FlaxCrossAttnDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCamelCase : Dict = FlaxDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a_)
for _ in range(self.layers_per_block):
_UpperCamelCase : Dict = nn.Conv(
a_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_)
if not is_final_block:
_UpperCamelCase : Union[str, Any] = nn.Conv(
a_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_)
_UpperCamelCase : List[str] = down_blocks
_UpperCamelCase : Union[str, Any] = controlnet_down_blocks
# mid
_UpperCamelCase : Optional[int] = block_out_channels[-1]
_UpperCamelCase : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCamelCase : str = nn.Conv(
a_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = 1.0 , __snake_case = True , __snake_case = False , ):
_UpperCamelCase : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCamelCase : int = jnp.flip(a_ , axis=1)
# 1. time
if not isinstance(a_ , jnp.ndarray):
_UpperCamelCase : List[str] = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(a_ , jnp.ndarray) and len(timesteps.shape) == 0:
_UpperCamelCase : str = timesteps.astype(dtype=jnp.floataa)
_UpperCamelCase : Any = jnp.expand_dims(a_ , 0)
_UpperCamelCase : Optional[int] = self.time_proj(a_)
_UpperCamelCase : Any = self.time_embedding(a_)
# 2. pre-process
_UpperCamelCase : Union[str, Any] = jnp.transpose(a_ , (0, 2, 3, 1))
_UpperCamelCase : Tuple = self.conv_in(a_)
_UpperCamelCase : Tuple = jnp.transpose(a_ , (0, 2, 3, 1))
_UpperCamelCase : int = self.controlnet_cond_embedding(a_)
sample += controlnet_cond
# 3. down
_UpperCamelCase : str = (sample,)
for down_block in self.down_blocks:
if isinstance(a_ , a_):
_UpperCamelCase : Dict = down_block(a_ , a_ , a_ , deterministic=not train)
else:
_UpperCamelCase : Tuple = down_block(a_ , a_ , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_UpperCamelCase : List[str] = self.mid_block(a_ , a_ , a_ , deterministic=not train)
# 5. contronet blocks
_UpperCamelCase : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(a_ , self.controlnet_down_blocks):
_UpperCamelCase : Optional[int] = controlnet_block(a_)
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase : List[Any] = controlnet_down_block_res_samples
_UpperCamelCase : Any = self.controlnet_mid_block(a_)
# 6. scaling
_UpperCamelCase : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_ , mid_block_res_sample=a_)
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "tokenizer"]
a__ = "CLIPImageProcessor"
a__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case):
_UpperCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __snake_case , )
_UpperCamelCase : List[str] = kwargs.pop('feature_extractor')
_UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(__snake_case , __snake_case)
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
_UpperCamelCase : Optional[Any] = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case)
if images is not None:
_UpperCamelCase : Any = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case)
if text is not None and images is not None:
_UpperCamelCase : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case) , tensor_type=__snake_case)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.decode(*__snake_case , **__snake_case)
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.tokenizer.model_input_names
_UpperCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def A__ ( self):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __snake_case , )
return self.image_processor_class
@property
def A__ ( self):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __snake_case , )
return self.image_processor
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import random
from typing import Any
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list[Any]:
'''simple docstring'''
for _ in range(len(_A ) ):
_UpperCamelCase : Dict = random.randint(0 , len(_A ) - 1 )
_UpperCamelCase : Any = random.randint(0 , len(_A ) - 1 )
_UpperCamelCase , _UpperCamelCase : str = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ = ['python', 'says', 'hello', '!']
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase__ = pd.read_csv('''sample_data.csv''', header=None)
lowerCAmelCase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase__ = df.iloc[:, 1:2]
lowerCAmelCase__ = actual_data.values.reshape(len_data, 1)
lowerCAmelCase__ = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 5
lowerCAmelCase__ = 2_0
lowerCAmelCase__ = len_data - periods * look_back
lowerCAmelCase__ = actual_data[:division]
lowerCAmelCase__ = actual_data[division - look_back :]
lowerCAmelCase__ = [], []
lowerCAmelCase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase__ = np.array(train_x)
lowerCAmelCase__ = np.array(test_x)
lowerCAmelCase__ = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase__ = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase__ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowerCAmelCase__ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase__ = model.predict(x_test)
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict = "cpu" , UpperCAmelCase_ : Union[str, Any] = None ) -> None:
'''simple docstring'''
_UpperCamelCase : Tuple = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_UpperCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_UpperCamelCase : List[str] = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : List[str] = number
while duplicate > 0:
_UpperCamelCase : Dict = divmod(lowerCamelCase_ , 1_0 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
lowerCAmelCase__ = int(input("""Enter number: """).strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( __lowerCamelCase ):
"""simple docstring"""
def A__ ( self , __snake_case):
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_)
_UpperCamelCase : List[str] = {'source': 'What is love ?', 'target': 'life'}
_UpperCamelCase : int = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCamelCase : Optional[Any] = '\n'.join([contents[field]] * n_lines[split])
with open(os.path.join(UpperCamelCase_ , f'''{split}.{field}''') , 'w') as f:
f.write(UpperCamelCase_)
def A__ ( self , __snake_case , __snake_case = "pytorch"):
_UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCamelCase : List[str] = os.path.join(UpperCamelCase_ , 'output')
_UpperCamelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , 'data')
self._create_dummy_data(data_dir=UpperCamelCase_)
_UpperCamelCase : Optional[int] = f'''\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''')
if is_apex_available():
testargs.append('--fp16')
else:
testargs.append('--gpus=0')
testargs.append('--distributed_backend=ddp_cpu')
testargs.append('--num_processes=2')
_UpperCamelCase : List[Any] = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(UpperCamelCase_ , env=self.get_env())
_UpperCamelCase : Any = os.path.join(UpperCamelCase_ , 'metrics.json')
with open(UpperCamelCase_) as f:
_UpperCamelCase : Optional[int] = json.load(UpperCamelCase_)
return result
@require_torch_gpu
def A__ ( self):
_UpperCamelCase : str = self._run_finetune(gpus=1)
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_multi_gpu
def A__ ( self):
_UpperCamelCase : List[str] = self._run_finetune(gpus=2)
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_gpu
@require_ray
def A__ ( self):
_UpperCamelCase : List[Any] = self._run_finetune(gpus=1 , distributed_retriever='ray')
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_multi_gpu
@require_ray
def A__ ( self):
_UpperCamelCase : Any = self._run_finetune(gpus=1 , distributed_retriever='ray')
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return array
_UpperCamelCase : Tuple = min(lowerCamelCase__ ), max(lowerCamelCase__ )
# Compute the variables
_UpperCamelCase : str = _max - _min + 1
_UpperCamelCase : Dict = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCamelCase : int = i - _min
_UpperCamelCase : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCamelCase : Optional[int] = 0
for i in range(lowerCamelCase__ ):
while holes_repeat[i] > 0:
_UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input("""Enter numbers separated by comma:\n""")
lowerCAmelCase__ = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["pixel_values"]
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 2_55 , __snake_case = True , __snake_case = None , __snake_case = None , __snake_case = True , **__snake_case , ):
super().__init__(**lowercase_)
_UpperCamelCase : List[str] = size if size is not None else {'shortest_edge': 2_24}
_UpperCamelCase : str = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCamelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size')
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : str = size
_UpperCamelCase : Union[str, Any] = resample
_UpperCamelCase : List[str] = do_center_crop
_UpperCamelCase : Optional[int] = crop_size
_UpperCamelCase : int = do_rescale
_UpperCamelCase : List[str] = rescale_factor
_UpperCamelCase : Optional[Any] = do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : Optional[Any] = do_convert_rgb
def A__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ):
_UpperCamelCase : int = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
_UpperCamelCase : Union[str, Any] = get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def A__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
_UpperCamelCase : Any = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def A__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ):
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def A__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ):
_UpperCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : List[Any] = size if size is not None else self.size
_UpperCamelCase : Optional[int] = get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_)
_UpperCamelCase : Tuple = resample if resample is not None else self.resample
_UpperCamelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_)
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : Dict = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : List[Any] = [convert_to_rgb(lowercase_) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : int = [to_numpy_array(lowercase_) for image in images]
if do_resize:
_UpperCamelCase : Optional[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
_UpperCamelCase : Tuple = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
_UpperCamelCase : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
_UpperCamelCase : Any = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
_UpperCamelCase : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_UpperCamelCase : Dict = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
a__ = '''blip_text_model'''
def __init__( self , __snake_case=3_05_24 , __snake_case=7_68 , __snake_case=7_68 , __snake_case=30_72 , __snake_case=7_68 , __snake_case=12 , __snake_case=8 , __snake_case=5_12 , __snake_case="gelu" , __snake_case=1e-12 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=3_05_22 , __snake_case=2 , __snake_case=0 , __snake_case=1_02 , __snake_case=True , __snake_case=True , **__snake_case , ):
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = encoder_hidden_size
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : List[str] = projection_dim
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : Dict = is_decoder
_UpperCamelCase : Dict = use_cache
@classmethod
def A__ ( cls , __snake_case , **__snake_case):
cls._set_token_in_kwargs(lowerCAmelCase_)
_UpperCamelCase , _UpperCamelCase : str = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
_UpperCamelCase : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
a__ = '''blip_vision_model'''
def __init__( self , __snake_case=7_68 , __snake_case=30_72 , __snake_case=5_12 , __snake_case=12 , __snake_case=12 , __snake_case=3_84 , __snake_case=16 , __snake_case="gelu" , __snake_case=1e-5 , __snake_case=0.0 , __snake_case=1e-10 , **__snake_case , ):
super().__init__(**lowerCAmelCase_)
_UpperCamelCase : str = hidden_size
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : List[Any] = projection_dim
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[Any] = attention_dropout
_UpperCamelCase : Optional[int] = layer_norm_eps
_UpperCamelCase : Dict = hidden_act
@classmethod
def A__ ( cls , __snake_case , **__snake_case):
cls._set_token_in_kwargs(lowerCAmelCase_)
_UpperCamelCase , _UpperCamelCase : Dict = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
_UpperCamelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
a__ = '''blip'''
a__ = True
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=5_12 , __snake_case=2.6_5_9_2 , __snake_case=2_56 , **__snake_case , ):
super().__init__(**lowerCAmelCase_)
if text_config is None:
_UpperCamelCase : Dict = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.')
if vision_config is None:
_UpperCamelCase : Any = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.')
_UpperCamelCase : List[Any] = BlipTextConfig(**lowerCAmelCase_)
_UpperCamelCase : Any = BlipVisionConfig(**lowerCAmelCase_)
_UpperCamelCase : Dict = self.vision_config.hidden_size
_UpperCamelCase : Optional[Any] = projection_dim
_UpperCamelCase : Optional[int] = logit_scale_init_value
_UpperCamelCase : Any = 1.0
_UpperCamelCase : Tuple = 0.0_2
_UpperCamelCase : Optional[int] = image_text_hidden_size
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_)
def A__ ( self):
_UpperCamelCase : str = copy.deepcopy(self.__dict__)
_UpperCamelCase : List[Any] = self.text_config.to_dict()
_UpperCamelCase : Any = self.vision_config.to_dict()
_UpperCamelCase : List[str] = self.__class__.model_type
return output
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
from timeit import timeit
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_UpperCamelCase : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_UpperCamelCase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
def do_benchmark(UpperCAmelCase_ : str ) -> None:
_UpperCamelCase : Tuple = '''import __main__ as z'''
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(UpperCAmelCase_ ) = }''' )
_UpperCamelCase : Tuple = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=UpperCAmelCase_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase_ ) = }''' )
_UpperCamelCase : Union[str, Any] = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=UpperCAmelCase_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
a__ = ["image_processor", "tokenizer"]
a__ = "BridgeTowerImageProcessor"
a__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __snake_case , __snake_case):
super().__init__(snake_case__ , snake_case__)
def __call__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ):
_UpperCamelCase : Optional[int] = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel_values + pixel_mask
_UpperCamelCase : Optional[int] = self.image_processor(
snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__)
encoding.update(snake_case__)
return encoding
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.decode(*snake_case__ , **snake_case__)
@property
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCAmelCase__ = get_logger()
lowerCAmelCase__ = None
class lowercase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case):
super().__init__(features=SCREAMING_SNAKE_CASE_)
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
raise ValueError(
f'''Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE_)}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.')
_UpperCamelCase : Tuple = device if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCamelCase : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '''
f'''device: {str(jax.devices()[0])}.''')
_UpperCamelCase : str = str(jax.devices()[0])
_UpperCamelCase : Optional[int] = jnp_array_kwargs
@staticmethod
def A__ ( ):
import jax
return {str(SCREAMING_SNAKE_CASE_): device for device in jax.devices()}
def A__ ( self , __snake_case):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_ , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(SCREAMING_SNAKE_CASE_ , axis=0)
return column
def A__ ( self , __snake_case):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_ , (str, bytes, type(SCREAMING_SNAKE_CASE_))):
return value
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
_UpperCamelCase : Optional[Any] = {}
if isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCamelCase : int = {'dtype': jnp.intaa}
else:
_UpperCamelCase : Optional[int] = {'dtype': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
_UpperCamelCase : int = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image):
_UpperCamelCase : List[str] = np.asarray(SCREAMING_SNAKE_CASE_)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCamelCase : Union[str, Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE_ , **{**default_dtype, **self.jnp_array_kwargs})
def A__ ( self , __snake_case):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(SCREAMING_SNAKE_CASE_ , '__array__') and not isinstance(SCREAMING_SNAKE_CASE_ , jax.Array):
_UpperCamelCase : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_) for substruct in data_struct])
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_) for substruct in data_struct])
return self._tensorize(SCREAMING_SNAKE_CASE_)
def A__ ( self , __snake_case):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE_ , map_list=SCREAMING_SNAKE_CASE_)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_)
_UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_)
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_)
_UpperCamelCase : Tuple = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_ , pa_table.column_names[0])
_UpperCamelCase : str = self.recursive_tensorize(SCREAMING_SNAKE_CASE_)
_UpperCamelCase : int = self._consolidate(SCREAMING_SNAKE_CASE_)
return column
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_)
_UpperCamelCase : str = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_)
_UpperCamelCase : int = self.recursive_tensorize(SCREAMING_SNAKE_CASE_)
for column_name in batch:
_UpperCamelCase : str = self._consolidate(batch[column_name])
return batch
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
return "".join(chr(ord(UpperCAmelCase_ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase__ = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
lowerCAmelCase__ = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
lowerCAmelCase__ = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[str] = float(fa_score(y_true=UpperCAmelCase_ , y_pred=UpperCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = np.array(UpperCAmelCase_ )
_UpperCamelCase : List[str] = np.array(UpperCAmelCase_ )
_UpperCamelCase : str = en_sentvecs.shape[0]
# mean centering
_UpperCamelCase : Tuple = en_sentvecs - np.mean(UpperCAmelCase_ , axis=0 )
_UpperCamelCase : str = in_sentvecs - np.mean(UpperCAmelCase_ , axis=0 )
_UpperCamelCase : str = cdist(UpperCAmelCase_ , UpperCAmelCase_ , 'cosine' )
_UpperCamelCase : List[str] = np.array(range(UpperCAmelCase_ ) )
_UpperCamelCase : Dict = sim.argsort(axis=1 )[:, :1_0]
_UpperCamelCase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def A__ ( self , __snake_case , __snake_case):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase__ , UpperCAmelCase__)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a__ = UNetaDModel
a__ = "sample"
@property
def A__ ( self):
_UpperCamelCase : Dict = 4
_UpperCamelCase : Tuple = 3
_UpperCamelCase : Dict = (32, 32)
_UpperCamelCase : str = floats_tensor((batch_size, num_channels) + sizes).to(_lowerCamelCase)
_UpperCamelCase : Any = torch.tensor([10]).to(_lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self):
return (3, 32, 32)
@property
def A__ ( self):
return (3, 32, 32)
def A__ ( self):
_UpperCamelCase : List[Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_UpperCamelCase : int = self.dummy_input
return init_dict, inputs_dict
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a__ = UNetaDModel
a__ = "sample"
@property
def A__ ( self):
_UpperCamelCase : str = 4
_UpperCamelCase : int = 4
_UpperCamelCase : Union[str, Any] = (32, 32)
_UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes).to(_lowerCamelCase)
_UpperCamelCase : Dict = torch.tensor([10]).to(_lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self):
return (4, 32, 32)
@property
def A__ ( self):
return (4, 32, 32)
def A__ ( self):
_UpperCamelCase : List[str] = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_UpperCamelCase : Any = self.dummy_input
return init_dict, inputs_dict
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(_lowerCamelCase)
_UpperCamelCase : List[Any] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU')
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Tuple = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_lowerCamelCase)
model.to(_lowerCamelCase)
_UpperCamelCase : List[Any] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU')
def A__ ( self):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCamelCase , _UpperCamelCase : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_lowerCamelCase)
model_accelerate.to(_lowerCamelCase)
model_accelerate.eval()
_UpperCamelCase : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
_UpperCamelCase : int = noise.to(_lowerCamelCase)
_UpperCamelCase : Optional[int] = torch.tensor([10] * noise.shape[0]).to(_lowerCamelCase)
_UpperCamelCase : Any = model_accelerate(_lowerCamelCase , _lowerCamelCase)['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCamelCase , _UpperCamelCase : Tuple = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=_lowerCamelCase , low_cpu_mem_usage=_lowerCamelCase)
model_normal_load.to(_lowerCamelCase)
model_normal_load.eval()
_UpperCamelCase : int = model_normal_load(_lowerCamelCase , _lowerCamelCase)['sample']
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-3)
def A__ ( self):
_UpperCamelCase : Dict = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update')
model.eval()
model.to(_lowerCamelCase)
_UpperCamelCase : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
_UpperCamelCase : Dict = noise.to(_lowerCamelCase)
_UpperCamelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0]).to(_lowerCamelCase)
with torch.no_grad():
_UpperCamelCase : List[Any] = model(_lowerCamelCase , _lowerCamelCase).sample
_UpperCamelCase : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0])
# fmt: on
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-3))
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a__ = UNetaDModel
a__ = "sample"
@property
def A__ ( self , __snake_case=(32, 32)):
_UpperCamelCase : Optional[Any] = 4
_UpperCamelCase : int = 3
_UpperCamelCase : Dict = floats_tensor((batch_size, num_channels) + sizes).to(_lowerCamelCase)
_UpperCamelCase : Union[str, Any] = torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=_lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self):
return (3, 32, 32)
@property
def A__ ( self):
return (3, 32, 32)
def A__ ( self):
_UpperCamelCase : Optional[int] = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_UpperCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Tuple = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(_lowerCamelCase)
_UpperCamelCase : Dict = self.dummy_input
_UpperCamelCase : Dict = floats_tensor((4, 3) + (2_56, 2_56)).to(_lowerCamelCase)
_UpperCamelCase : str = noise
_UpperCamelCase : Union[str, Any] = model(**_lowerCamelCase)
assert image is not None, "Make sure output is not None"
@slow
def A__ ( self):
_UpperCamelCase : int = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256')
model.to(_lowerCamelCase)
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : int = 3
_UpperCamelCase : Union[str, Any] = (2_56, 2_56)
_UpperCamelCase : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes).to(_lowerCamelCase)
_UpperCamelCase : Optional[Any] = torch.tensor(batch_size * [1e-4]).to(_lowerCamelCase)
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(_lowerCamelCase , _lowerCamelCase).sample
_UpperCamelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCamelCase : int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08])
# fmt: on
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2))
def A__ ( self):
_UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update')
model.to(_lowerCamelCase)
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : Tuple = 3
_UpperCamelCase : List[Any] = (32, 32)
_UpperCamelCase : Dict = torch.ones((batch_size, num_channels) + sizes).to(_lowerCamelCase)
_UpperCamelCase : str = torch.tensor(batch_size * [1e-4]).to(_lowerCamelCase)
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase).sample
_UpperCamelCase : str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCamelCase : int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6])
# fmt: on
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2))
def A__ ( self):
# not required for this model
pass
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
import itertools
import math
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
_UpperCamelCase : Dict = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0_0_1 ) -> List[Any]:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowerCamelCase_ ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
class lowercase :
"""simple docstring"""
def __init__( self):
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Dict = {}
def A__ ( self , __snake_case):
if vertex not in self.adjacency:
_UpperCamelCase : str = {}
self.num_vertices += 1
def A__ ( self , __snake_case , __snake_case , __snake_case):
self.add_vertex(_lowercase)
self.add_vertex(_lowercase)
if head == tail:
return
_UpperCamelCase : Tuple = weight
_UpperCamelCase : List[str] = weight
def A__ ( self):
_UpperCamelCase : str = self.get_edges()
for edge in edges:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = edge
edges.remove((tail, head, weight))
for i in range(len(_lowercase)):
_UpperCamelCase : List[Any] = list(edges[i])
edges.sort(key=lambda __snake_case: e[2])
for i in range(len(_lowercase) - 1):
if edges[i][2] >= edges[i + 1][2]:
_UpperCamelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = edge
_UpperCamelCase : str = weight
_UpperCamelCase : Optional[Any] = weight
def __str__( self):
_UpperCamelCase : Optional[Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCamelCase : Optional[Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n')
def A__ ( self):
_UpperCamelCase : List[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def A__ ( self):
return self.adjacency.keys()
@staticmethod
def A__ ( __snake_case=None , __snake_case=None):
_UpperCamelCase : List[str] = Graph()
if vertices is None:
_UpperCamelCase : Dict = []
if edges is None:
_UpperCamelCase : int = []
for vertex in vertices:
g.add_vertex(_lowercase)
for edge in edges:
g.add_edge(*_lowercase)
return g
class lowercase :
"""simple docstring"""
def __init__( self):
_UpperCamelCase : str = {}
_UpperCamelCase : str = {}
def __len__( self):
return len(self.parent)
def A__ ( self , __snake_case):
if item in self.parent:
return self.find(_lowercase)
_UpperCamelCase : Any = item
_UpperCamelCase : List[Any] = 0
return item
def A__ ( self , __snake_case):
if item not in self.parent:
return self.make_set(_lowercase)
if item != self.parent[item]:
_UpperCamelCase : Optional[Any] = self.find(self.parent[item])
return self.parent[item]
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = self.find(_lowercase)
_UpperCamelCase : Optional[int] = self.find(_lowercase)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCamelCase : Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCamelCase : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCamelCase : Optional[int] = roota
return roota
return None
@staticmethod
def A__ ( __snake_case):
_UpperCamelCase : Tuple = graph.num_vertices
_UpperCamelCase : Tuple = Graph.UnionFind()
_UpperCamelCase : Optional[Any] = []
while num_components > 1:
_UpperCamelCase : List[str] = {}
for vertex in graph.get_vertices():
_UpperCamelCase : Dict = -1
_UpperCamelCase : Optional[Any] = graph.get_edges()
for edge in edges:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = edge
edges.remove((tail, head, weight))
for edge in edges:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = edge
_UpperCamelCase : Union[str, Any] = union_find.find(_lowercase)
_UpperCamelCase : List[Any] = union_find.find(_lowercase)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCamelCase : List[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCamelCase : int = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = cheap_edge[vertex]
if union_find.find(_lowercase) != union_find.find(_lowercase):
union_find.union(_lowercase , _lowercase)
mst_edges.append(cheap_edge[vertex])
_UpperCamelCase : Any = num_components - 1
_UpperCamelCase : List[Any] = Graph.build(edges=_lowercase)
return mst
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case):
super().__init__(*A_ , **A_)
_UpperCamelCase : Dict = eval_examples
_UpperCamelCase : Tuple = post_process_function
_UpperCamelCase : List[Any] = quant_trainer_args
_UpperCamelCase : List[Any] = 1_28 # default number of calibration samples
def A__ ( self , __snake_case=None):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
_UpperCamelCase : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
_UpperCamelCase : Union[str, Any] = self._remove_unused_columns(A_ , description='Calibration')
return DataLoader(
A_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A_ , )
def A__ ( self , __snake_case=None):
_UpperCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
_UpperCamelCase : int = self.get_calib_dataloader(A_)
_UpperCamelCase : List[str] = self.model
quant_trainer.configure_model(A_ , self.quant_trainer_args , calib=A_)
model.eval()
quant_trainer.enable_calibration(A_)
logger.info('***** Running calibration *****')
logger.info(f''' Num examples = {self.calib_num}''')
logger.info(f''' Batch size = {calib_dataloader.batch_size}''')
for step, inputs in enumerate(A_):
# Prediction step
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.prediction_step(A_ , A_ , prediction_loss_only=A_)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A_ , self.quant_trainer_args)
_UpperCamelCase : Any = model
def A__ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval"):
_UpperCamelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase : Union[str, Any] = self.get_eval_dataloader(A_)
_UpperCamelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Tuple = self.compute_metrics
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : Tuple = eval_loop(
A_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
_UpperCamelCase : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_UpperCamelCase : int = self.post_process_function(A_ , A_ , output.predictions)
_UpperCamelCase : int = self.compute_metrics(A_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
_UpperCamelCase : Tuple = metrics.pop(A_)
self.log(A_)
else:
_UpperCamelCase : Dict = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_UpperCamelCase : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_)
return metrics
def A__ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test"):
_UpperCamelCase : Any = self.get_test_dataloader(A_)
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : str = self.compute_metrics
_UpperCamelCase : Any = None
_UpperCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : Optional[Any] = eval_loop(
A_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
_UpperCamelCase : List[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase : List[Any] = self.post_process_function(A_ , A_ , output.predictions , 'predict')
_UpperCamelCase : Optional[Any] = self.compute_metrics(A_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
_UpperCamelCase : Union[str, Any] = metrics.pop(A_)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_)
def A__ ( self , __snake_case="./"):
_UpperCamelCase : str = self.eval_dataset
_UpperCamelCase : List[str] = self.get_eval_dataloader(A_)
_UpperCamelCase : List[Any] = next(iter(A_))
# saving device - to make it consistent
_UpperCamelCase : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
_UpperCamelCase : Union[str, Any] = tuple(v.to(A_) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
_UpperCamelCase : Dict = True
_UpperCamelCase : int = self.model.to(A_)
model.eval()
model.float()
_UpperCamelCase : List[str] = model.module if hasattr(A_ , 'module') else model
quant_trainer.configure_model(A_ , self.quant_trainer_args)
_UpperCamelCase : Dict = os.path.join(A_ , 'model.onnx')
logger.info(f'''exporting model to {output_model_file}''')
_UpperCamelCase : Optional[int] = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
A_ , A_ , A_ , export_params=A_ , opset_version=13 , do_constant_folding=A_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=A_ , )
logger.info('onnx export finished')
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase ( __lowercase ):
"""simple docstring"""
a__ = "dandelin/vilt-b32-finetuned-vqa"
a__ = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
a__ = "image_qa"
a__ = AutoProcessor
a__ = AutoModelForVisualQuestionAnswering
a__ = ["image", "text"]
a__ = ["text"]
def __init__( self , *__snake_case , **__snake_case):
requires_backends(self , ['vision'])
super().__init__(*__snake_case , **__snake_case)
def A__ ( self , __snake_case , __snake_case):
return self.pre_processor(__snake_case , __snake_case , return_tensors='pt')
def A__ ( self , __snake_case):
with torch.no_grad():
return self.model(**__snake_case).logits
def A__ ( self , __snake_case):
_UpperCamelCase : List[str] = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
lowerCAmelCase__ = "Input must be a string of 8 numbers plus letter"
lowerCAmelCase__ = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[int] = F'''Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}'''
raise TypeError(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Tuple = spanish_id.replace('-' , '' ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
_UpperCamelCase : Optional[int] = int(spanish_id_clean[0:8] )
_UpperCamelCase : List[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowercase ( A_ ):
"""simple docstring"""
a__ = '''levit'''
def __init__( self , __snake_case=2_24 , __snake_case=3 , __snake_case=3 , __snake_case=2 , __snake_case=1 , __snake_case=16 , __snake_case=[1_28, 2_56, 3_84] , __snake_case=[4, 8, 12] , __snake_case=[4, 4, 4] , __snake_case=[16, 16, 16] , __snake_case=0 , __snake_case=[2, 2, 2] , __snake_case=[2, 2, 2] , __snake_case=0.0_2 , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : int = kernel_size
_UpperCamelCase : Optional[int] = stride
_UpperCamelCase : List[str] = padding
_UpperCamelCase : Optional[Any] = hidden_sizes
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : str = depths
_UpperCamelCase : int = key_dim
_UpperCamelCase : Optional[int] = drop_path_rate
_UpperCamelCase : str = patch_size
_UpperCamelCase : Union[str, Any] = attention_ratio
_UpperCamelCase : Optional[int] = mlp_ratio
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowercase ( A_ ):
"""simple docstring"""
a__ = version.parse("1.11" )
@property
def A__ ( self):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def A__ ( self):
return 1e-4
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = tmp_path / 'cache'
_UpperCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase : Tuple = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase : Tuple = tmp_path / 'cache'
_UpperCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_UpperCamelCase : Optional[int] = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase : List[str] = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[str] = tmp_path / 'cache'
_UpperCamelCase : int = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
_UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_UpperCamelCase : Tuple = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase : Any = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
_UpperCamelCase : List[str] = features.copy()
_UpperCamelCase : Any = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase : str = tmp_path / 'cache'
_UpperCamelCase : Tuple = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase : str = tmp_path / 'cache'
_UpperCamelCase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCamelCase : Union[str, Any] = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Any = jsonl_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : List[str] = [jsonl_path]
_UpperCamelCase : str = tmp_path / 'cache'
_UpperCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCamelCase : Optional[int] = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=("train",) ) -> int:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
_UpperCamelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = tmp_path / 'cache'
_UpperCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase : Union[str, Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = tmp_path / 'cache'
_UpperCamelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCamelCase : Dict = features.copy() if features else default_expected_features
_UpperCamelCase : Any = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase : str = JsonDatasetReader({'train': jsonl_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
if split:
_UpperCamelCase : str = {split: jsonl_path}
else:
_UpperCamelCase : List[str] = 'train'
_UpperCamelCase : Optional[int] = {'train': jsonl_path, 'test': jsonl_path}
_UpperCamelCase : Optional[int] = tmp_path / 'cache'
_UpperCamelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCamelCase : Tuple = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return json.load(lowerCAmelCase__ )
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
return [json.loads(lowerCAmelCase__ ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)])
def A__ ( self , __snake_case , __snake_case , __snake_case):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__).write()
buffer.seek(0)
_UpperCamelCase : Any = load_json_function(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
assert isinstance(exported_content[0] , UpperCamelCase__)
assert len(UpperCamelCase__) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789'), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__).write()
buffer.seek(0)
_UpperCamelCase : Optional[int] = load_json(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , 'keys') and not hasattr(exported_content[0] , 'keys')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(UpperCamelCase__) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)])
def A__ ( self , __snake_case , __snake_case , __snake_case):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2).write()
buffer.seek(0)
_UpperCamelCase : Any = load_json_function(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
assert isinstance(exported_content[0] , UpperCamelCase__)
assert len(UpperCamelCase__) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789'), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2).write()
buffer.seek(0)
_UpperCamelCase : Union[str, Any] = load_json(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , 'keys') and not hasattr(exported_content[0] , 'keys')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(UpperCamelCase__) == 10
def A__ ( self , __snake_case):
with pytest.raises(UpperCamelCase__):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0)
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')])
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Union[str, Any] = tmp_path_factory.mktemp('data') / f'''test.json.{extension}'''
_UpperCamelCase : Tuple = str(shared_datadir / f'''test_file.json.{extension}''')
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__).write()
with fsspec.open(UpperCamelCase__ , 'rb' , compression='infer') as f:
_UpperCamelCase : int = f.read()
with fsspec.open(UpperCamelCase__ , 'rb' , compression='infer') as f:
_UpperCamelCase : List[str] = f.read()
assert exported_content == original_content
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( a__ ):
"""simple docstring"""
a__ = ["image_processor", "tokenizer"]
a__ = "BlipImageProcessor"
a__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = False
super().__init__(lowerCamelCase_ , lowerCamelCase_)
_UpperCamelCase : List[Any] = self.image_processor
def __call__( self , __snake_case = None , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
_UpperCamelCase : Any = self.tokenizer
_UpperCamelCase : Any = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
# add pixel_values
_UpperCamelCase : Any = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_)
if text is not None:
_UpperCamelCase : Union[str, Any] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
else:
_UpperCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_)
return encoding_image_processor
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_)
def A__ ( self , *__snake_case , **__snake_case):
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_)
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.tokenizer.model_input_names
_UpperCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( UpperCAmelCase_ : int = 4_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
_UpperCamelCase : int = [0, 1]
_UpperCamelCase : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_UpperCamelCase : Dict = 0
for j in range(len(UpperCAmelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'{solution() = }')
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( __UpperCAmelCase ):
"""simple docstring"""
a__ = "yolos"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=[5_12, 8_64] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=1_00 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ):
super().__init__(**lowerCAmelCase_)
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : List[Any] = patch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Optional[Any] = qkv_bias
_UpperCamelCase : Optional[Any] = num_detection_tokens
_UpperCamelCase : int = use_mid_position_embeddings
_UpperCamelCase : Optional[int] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[str] = class_cost
_UpperCamelCase : str = bbox_cost
_UpperCamelCase : Tuple = giou_cost
# Loss coefficients
_UpperCamelCase : str = bbox_loss_coefficient
_UpperCamelCase : List[str] = giou_loss_coefficient
_UpperCamelCase : Any = eos_coefficient
class lowercase ( __UpperCAmelCase ):
"""simple docstring"""
a__ = version.parse("1.11" )
@property
def A__ ( self):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def A__ ( self):
return 1e-4
@property
def A__ ( self):
return 12
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = len(a_ ), len(grid[0] )
if (
min(a_ , a_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCamelCase : Optional[Any] = 0
count += depth_first_search(a_ , row + 1 , a_ , a_ )
count += depth_first_search(a_ , row - 1 , a_ , a_ )
count += depth_first_search(a_ , a_ , col + 1 , a_ )
count += depth_first_search(a_ , a_ , col - 1 , a_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase__ = """pt"""
elif is_tf_available():
lowerCAmelCase__ = """tf"""
else:
lowerCAmelCase__ = """jax"""
class lowercase ( _a , unittest.TestCase ):
"""simple docstring"""
a__ = PerceiverTokenizer
a__ = False
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def A__ ( self):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
def A__ ( self , **__snake_case):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A)
def A__ ( self , __snake_case , __snake_case=False , __snake_case=20 , __snake_case=5):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_UpperCamelCase : List[Any] = []
for i in range(len(_A)):
try:
_UpperCamelCase : Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A)
except UnicodeDecodeError:
pass
toks.append((i, tok))
_UpperCamelCase : List[str] = list(filter(lambda __snake_case: re.match(r'^[ a-zA-Z]+$' , t[1]) , _A))
_UpperCamelCase : Optional[Any] = list(filter(lambda __snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A) , _A))
if max_length is not None and len(_A) > max_length:
_UpperCamelCase : Any = toks[:max_length]
if min_length is not None and len(_A) < min_length and len(_A) > 0:
while len(_A) < min_length:
_UpperCamelCase : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_UpperCamelCase : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
_UpperCamelCase : List[str] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A)
if " " not in output_txt and len(_A) > 1:
_UpperCamelCase : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A)
)
if with_prefix_space:
_UpperCamelCase : Dict = ' ' + output_txt
_UpperCamelCase : str = tokenizer.encode(_A , add_special_tokens=_A)
return output_txt, output_ids
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.perceiver_tokenizer
_UpperCamelCase : Tuple = 'Unicode €.'
_UpperCamelCase : Dict = tokenizer(_A)
_UpperCamelCase : Union[str, Any] = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded['input_ids'] , _A)
# decoding
_UpperCamelCase : int = tokenizer.decode(_A)
self.assertEqual(_A , '[CLS]Unicode €.[SEP]')
_UpperCamelCase : str = tokenizer('e è é ê ë')
_UpperCamelCase : List[str] = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded['input_ids'] , _A)
# decoding
_UpperCamelCase : int = tokenizer.decode(_A)
self.assertEqual(_A , '[CLS]e è é ê ë[SEP]')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , '[CLS]e è é ê ë[SEP]')
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.perceiver_tokenizer
_UpperCamelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_UpperCamelCase : Optional[Any] = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
_UpperCamelCase : List[Any] = tokenizer(_A , padding=_A , return_tensors=_A)
self.assertIsInstance(_A , _A)
if FRAMEWORK != "jax":
_UpperCamelCase : Optional[int] = list(batch.input_ids.numpy()[0])
else:
_UpperCamelCase : int = list(batch.input_ids.tolist()[0])
self.assertListEqual(_A , _A)
self.assertEqual((2, 38) , batch.input_ids.shape)
self.assertEqual((2, 38) , batch.attention_mask.shape)
def A__ ( self):
_UpperCamelCase : Optional[int] = self.perceiver_tokenizer
_UpperCamelCase : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCamelCase : List[str] = tokenizer(_A , padding=_A , return_tensors=_A)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A)
self.assertIn('attention_mask' , _A)
self.assertNotIn('decoder_input_ids' , _A)
self.assertNotIn('decoder_attention_mask' , _A)
def A__ ( self):
_UpperCamelCase : Dict = self.perceiver_tokenizer
_UpperCamelCase : List[Any] = [
'Summary of the text.',
'Another summary.',
]
_UpperCamelCase : int = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A)
self.assertEqual(32 , targets['input_ids'].shape[1])
def A__ ( self):
# safety check on max_len default value so we are sure the test works
_UpperCamelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
_UpperCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
_UpperCamelCase : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
_UpperCamelCase : List[Any] = tokenizer.encode(_A , add_special_tokens=_A)
tokenizer.save_pretrained(_A)
_UpperCamelCase : List[str] = tokenizer.__class__.from_pretrained(_A)
_UpperCamelCase : Tuple = after_tokenizer.encode(_A , add_special_tokens=_A)
self.assertListEqual(_A , _A)
shutil.rmtree(_A)
_UpperCamelCase : List[str] = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCamelCase : List[Any] = tempfile.mkdtemp()
_UpperCamelCase : List[str] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
_UpperCamelCase : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
_UpperCamelCase : Optional[Any] = tokenizer.encode(_A , add_special_tokens=_A)
tokenizer.save_pretrained(_A)
_UpperCamelCase : str = tokenizer.__class__.from_pretrained(_A)
_UpperCamelCase : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A)
self.assertListEqual(_A , _A)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
_UpperCamelCase : List[str] = tokenizer.__class__.from_pretrained(_A , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_A)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A)
with open(os.path.join(_A , 'special_tokens_map.json') , encoding='utf-8') as json_file:
_UpperCamelCase : Any = json.load(_A)
with open(os.path.join(_A , 'tokenizer_config.json') , encoding='utf-8') as json_file:
_UpperCamelCase : str = json.load(_A)
_UpperCamelCase : List[str] = [f'''<extra_id_{i}>''' for i in range(1_25)]
_UpperCamelCase : Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
_UpperCamelCase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_A , _A)
with open(os.path.join(_A , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(_A , _A)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_UpperCamelCase : int = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_UpperCamelCase : Dict = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A)]
_UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def A__ ( self):
_UpperCamelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78]) , '�')
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_UpperCamelCase : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
_UpperCamelCase : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_string(_A)
self.assertIsInstance(_A , _A)
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = None , __snake_case = None , **__snake_case , ):
super().__init__(
features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase : int = Generator(
cache_dir=__UpperCamelCase , features=__UpperCamelCase , generator=__UpperCamelCase , gen_kwargs=__UpperCamelCase , **__UpperCamelCase , )
def A__ ( self):
# Build iterable dataset
if self.streaming:
_UpperCamelCase : Union[str, Any] = self.builder.as_streaming_dataset(split='train')
# Build regular (map-style) dataset
else:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
_UpperCamelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCamelCase : Optional[Any] = self.builder.as_dataset(
split='train' , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = """RegNetConfig"""
# Base docstring
lowerCAmelCase__ = """facebook/regnet-y-040"""
lowerCAmelCase__ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCAmelCase__ = """facebook/regnet-y-040"""
lowerCAmelCase__ = """tabby, tabby cat"""
lowerCAmelCase__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case = 3 , __snake_case = 1 , __snake_case = 1 , __snake_case = "relu" , ):
super().__init__()
_UpperCamelCase : List[str] = nn.Convad(
__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , )
_UpperCamelCase : Any = nn.BatchNormad(__snake_case)
_UpperCamelCase : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = self.convolution(__snake_case)
_UpperCamelCase : Union[str, Any] = self.normalization(__snake_case)
_UpperCamelCase : str = self.activation(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_UpperCamelCase : str = config.num_channels
def A__ ( self , __snake_case):
_UpperCamelCase : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
_UpperCamelCase : List[str] = self.embedder(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case = 2):
super().__init__()
_UpperCamelCase : int = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case)
_UpperCamelCase : Optional[Any] = nn.BatchNormad(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : str = self.convolution(__snake_case)
_UpperCamelCase : int = self.normalization(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case):
super().__init__()
_UpperCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1))
_UpperCamelCase : List[str] = nn.Sequential(
nn.Convad(__snake_case , __snake_case , kernel_size=1) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1) , nn.Sigmoid() , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = self.pooler(__snake_case)
_UpperCamelCase : Any = self.attention(__snake_case)
_UpperCamelCase : List[str] = hidden_state * attention
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1):
super().__init__()
_UpperCamelCase : Dict = in_channels != out_channels or stride != 1
_UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width)
_UpperCamelCase : Optional[int] = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase : Union[str, Any] = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case) , )
_UpperCamelCase : List[Any] = ACTaFN[config.hidden_act]
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = hidden_state
_UpperCamelCase : List[Any] = self.layer(__snake_case)
_UpperCamelCase : Optional[int] = self.shortcut(__snake_case)
hidden_state += residual
_UpperCamelCase : Any = self.activation(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1):
super().__init__()
_UpperCamelCase : int = in_channels != out_channels or stride != 1
_UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width)
_UpperCamelCase : List[str] = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase : Optional[Any] = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case) , )
_UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def A__ ( self , __snake_case):
_UpperCamelCase : Tuple = hidden_state
_UpperCamelCase : int = self.layer(__snake_case)
_UpperCamelCase : int = self.shortcut(__snake_case)
hidden_state += residual
_UpperCamelCase : Optional[int] = self.activation(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 2 , __snake_case = 2 , ):
super().__init__()
_UpperCamelCase : Dict = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_UpperCamelCase : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case) for _ in range(depth - 1)] , )
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = self.layers(__snake_case)
return hidden_state
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : Optional[int] = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_UpperCamelCase : str = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:]):
self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case))
def A__ ( self , __snake_case , __snake_case = False , __snake_case = True):
_UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase : str = hidden_states + (hidden_state,)
_UpperCamelCase : List[str] = stage_module(__snake_case)
if output_hidden_states:
_UpperCamelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = RegNetConfig
a__ = """regnet"""
a__ = """pixel_values"""
a__ = True
def A__ ( self , __snake_case):
if isinstance(__snake_case , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def A__ ( self , __snake_case , __snake_case=False):
if isinstance(__snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = value
lowerCAmelCase__ = R"""\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
lowerCAmelCase__ = R"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__(__snake_case)
_UpperCamelCase : Optional[int] = config
_UpperCamelCase : Dict = RegNetEmbeddings(__snake_case)
_UpperCamelCase : Optional[int] = RegNetEncoder(__snake_case)
_UpperCamelCase : Dict = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , __snake_case , __snake_case = None , __snake_case = None):
_UpperCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase : Dict = self.embedder(__snake_case)
_UpperCamelCase : Any = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case)
_UpperCamelCase : str = encoder_outputs[0]
_UpperCamelCase : Union[str, Any] = self.pooler(__snake_case)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__(__snake_case)
_UpperCamelCase : Tuple = config.num_labels
_UpperCamelCase : int = RegNetModel(__snake_case)
# classification head
_UpperCamelCase : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
_UpperCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase : Optional[Any] = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case)
_UpperCamelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase : Optional[int] = self.classifier(__snake_case)
_UpperCamelCase : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase : Union[str, Any] = 'single_label_classification'
else:
_UpperCamelCase : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
_UpperCamelCase : List[Any] = MSELoss()
if self.num_labels == 1:
_UpperCamelCase : List[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_UpperCamelCase : Union[str, Any] = loss_fct(__snake_case , __snake_case)
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase : List[Any] = CrossEntropyLoss()
_UpperCamelCase : List[Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase : List[str] = BCEWithLogitsLoss()
_UpperCamelCase : Any = loss_fct(__snake_case , __snake_case)
if not return_dict:
_UpperCamelCase : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states)
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : int = jnp.ones((batch_size, length)) / length
return scores
def A__ ( self):
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Tuple = 20
_UpperCamelCase : Union[str, Any] = self._get_uniform_logits(batch_size=2 , length=__snake_case)
# tweak scores to not be uniform anymore
_UpperCamelCase : str = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
_UpperCamelCase : Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
_UpperCamelCase : str = jax.nn.softmax(__snake_case , axis=-1)
_UpperCamelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3)
_UpperCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(__snake_case , scores.copy() , cur_len=__snake_case) , axis=-1)
_UpperCamelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(__snake_case , scores.copy() , cur_len=__snake_case) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def A__ ( self):
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : List[str] = 10
_UpperCamelCase : Optional[int] = 2
# create ramp distribution
_UpperCamelCase : int = np.broadcast_to(np.arange(__snake_case)[None, :] , (batch_size, vocab_size)).copy()
_UpperCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : List[Any] = FlaxTopKLogitsWarper(3)
_UpperCamelCase : List[Any] = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
_UpperCamelCase : str = np.broadcast_to(np.arange(__snake_case)[None, :] , (batch_size, length)).copy()
_UpperCamelCase : str = top_k_warp_safety_check(__snake_case , __snake_case , cur_len=__snake_case)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def A__ ( self):
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Any = 10
_UpperCamelCase : str = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]]))
_UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8)
_UpperCamelCase : List[str] = np.exp(top_p_warp(__snake_case , __snake_case , cur_len=__snake_case))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]])
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3))
# check edge cases with negative and extreme logits
_UpperCamelCase : List[Any] = np.broadcast_to(np.arange(__snake_case)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : List[Any] = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
_UpperCamelCase : List[str] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def A__ ( self):
_UpperCamelCase : Optional[Any] = 20
_UpperCamelCase : Any = 4
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case)
# check that min length is applied at length 5
_UpperCamelCase : Dict = ids_tensor((batch_size, 20) , vocab_size=20)
_UpperCamelCase : str = 5
_UpperCamelCase : int = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : Dict = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf')])
# check that min length is not applied anymore at length 15
_UpperCamelCase : Union[str, Any] = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : int = 15
_UpperCamelCase : Optional[int] = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case)
self.assertFalse(jnp.isinf(__snake_case).any())
def A__ ( self):
_UpperCamelCase : int = 20
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case)
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Dict = ids_tensor((batch_size, 1) , vocab_size=20)
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Dict = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = logits_processor(__snake_case , __snake_case , cur_len=__snake_case)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Any = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = logits_processor(__snake_case , __snake_case , cur_len=__snake_case)
self.assertFalse(jnp.isinf(__snake_case).any())
def A__ ( self):
_UpperCamelCase : Any = 20
_UpperCamelCase : Any = 4
_UpperCamelCase : str = 0
_UpperCamelCase : List[Any] = 5
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case)
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Tuple = ids_tensor((batch_size, 4) , vocab_size=20)
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : Optional[int] = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : Union[str, Any] = logits_processor(__snake_case , __snake_case , cur_len=__snake_case)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Any = 3
_UpperCamelCase : str = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : Dict = logits_processor(__snake_case , __snake_case , cur_len=__snake_case)
self.assertFalse(jnp.isinf(__snake_case).any())
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : int = 10
_UpperCamelCase : int = 15
_UpperCamelCase : Tuple = 2
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Dict = 15
# dummy input_ids and scores
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, sequence_length) , __snake_case)
_UpperCamelCase : Dict = input_ids.copy()
_UpperCamelCase : Union[str, Any] = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : int = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5)
_UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3)
_UpperCamelCase : Any = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
_UpperCamelCase : str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case)
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case)
_UpperCamelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case)
_UpperCamelCase : List[Any] = 10
# no processor list
_UpperCamelCase : str = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Tuple = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Tuple = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Dict = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Dict = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : int = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case)
# with processor list
_UpperCamelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
_UpperCamelCase : Optional[Any] = processor(__snake_case , __snake_case , cur_len=__snake_case)
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def A__ ( self):
_UpperCamelCase : int = 4
_UpperCamelCase : Dict = 10
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Dict = 1
_UpperCamelCase : Tuple = 15
# dummy input_ids and scores
_UpperCamelCase : Any = ids_tensor((batch_size, sequence_length) , __snake_case)
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : Dict = self._get_uniform_logits(__snake_case , __snake_case)
_UpperCamelCase : Tuple = scores.copy()
# instantiate all dist processors
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5)
_UpperCamelCase : Any = FlaxTopKLogitsWarper(3)
_UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
_UpperCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case)
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case)
_UpperCamelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case)
_UpperCamelCase : List[Any] = 10
# no processor list
def run_no_processor_list(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Union[str, Any] = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Union[str, Any] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : Optional[Any] = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : int = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case)
_UpperCamelCase : List[str] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case)
return scores
# with processor list
def run_processor_list(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
_UpperCamelCase : int = processor(__snake_case , __snake_case , cur_len=__snake_case)
return scores
_UpperCamelCase : Dict = jax.jit(__snake_case)
_UpperCamelCase : Union[str, Any] = jax.jit(__snake_case)
_UpperCamelCase : str = jitted_run_no_processor_list(__snake_case , __snake_case , __snake_case)
_UpperCamelCase : Any = jitted_run_processor_list(__snake_case , __snake_case , __snake_case)
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase__ = {
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
lowerCAmelCase__ = "hopper-medium-v2"
lowerCAmelCase__ = gym.make(env_name)
lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
lowerCAmelCase__ = env.reset()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1_0_0_0
lowerCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase__ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
lowerCAmelCase__ = env.step(denorm_actions)
lowerCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
a__ = ["audio_values", "audio_mask"]
def __init__( self , __snake_case=20_48 , __snake_case=1 , __snake_case=[16, 16] , __snake_case=1_28 , __snake_case=4_41_00 , __snake_case=86 , __snake_case=20_48 , __snake_case=0.0 , **__snake_case , ):
super().__init__(
feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case , )
_UpperCamelCase : Optional[int] = spectrogram_length
_UpperCamelCase : Optional[int] = num_channels
_UpperCamelCase : Any = patch_size
_UpperCamelCase : Any = feature_size // self.patch_size[1]
_UpperCamelCase : List[str] = n_fft
_UpperCamelCase : Optional[Any] = sampling_rate // hop_length_to_sampling_rate
_UpperCamelCase : List[str] = sampling_rate
_UpperCamelCase : Any = padding_value
_UpperCamelCase : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__snake_case , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__snake_case , norm='slaney' , mel_scale='slaney' , ).T
def A__ ( self , __snake_case):
_UpperCamelCase : str = spectrogram(
__snake_case , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
_UpperCamelCase : Optional[Any] = log_spec[:, :-1]
_UpperCamelCase : Any = log_spec - 2_0.0
_UpperCamelCase : Any = np.clip(log_spec / 4_0.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = False , __snake_case = False , **__snake_case , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCamelCase : int = isinstance(__snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''')
_UpperCamelCase : Optional[int] = is_batched_numpy or (
isinstance(__snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCamelCase : List[str] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray):
_UpperCamelCase : List[Any] = np.asarray(__snake_case , dtype=np.floataa)
elif isinstance(__snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCamelCase : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCamelCase : List[Any] = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCamelCase : Optional[int] = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __snake_case):
_UpperCamelCase : Optional[Any] = [np.asarray(__snake_case , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
_UpperCamelCase : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCamelCase : List[Any] = np.array(__snake_case).astype(np.floataa)
# convert into correct format for padding
_UpperCamelCase : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCamelCase : int = np.ones([len(__snake_case), 1, max_time_len, self.feature_size]).astype(np.floataa)
_UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(__snake_case)):
_UpperCamelCase : str = audio_features[i]
_UpperCamelCase : str = feature
# return as BatchFeature
if return_attention_mask:
_UpperCamelCase : List[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
_UpperCamelCase : Union[str, Any] = {'''audio_values''': padded_audio_features}
_UpperCamelCase : Dict = BatchFeature(data=__snake_case , tensor_type=__snake_case)
return encoded_inputs
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = None
a__ = None
def lowerCamelCase_ ( ) -> Any:
_UpperCamelCase : int = Node(1 )
_UpperCamelCase : List[Any] = Node(2 )
_UpperCamelCase : Dict = Node(3 )
_UpperCamelCase : List[str] = Node(4 )
_UpperCamelCase : Dict = Node(5 )
return tree
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Dict:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Optional[int]:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> int:
_UpperCamelCase : Dict = []
if root is None:
return output
_UpperCamelCase : Optional[int] = deque([root] )
while process_queue:
_UpperCamelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = []
def populate_output(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> List[str]:
_UpperCamelCase : int = []
def populate_output(UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Optional[Any]:
if root is None:
return []
_UpperCamelCase : List[Any] = []
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[int] = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase : int = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase : int = 0
return output
def lowerCamelCase_ ( ) -> Any: # Main function for testing.
_UpperCamelCase : Optional[Any] = make_tree()
print(F'''In-order Traversal: {inorder(UpperCamelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCamelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCamelCase__ )}''' , '\n' )
print(F'''Height of Tree: {height(UpperCamelCase__ )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(UpperCamelCase__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import re
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : Tuple = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
try:
_UpperCamelCase : Optional[Any] = split_input(lowerCamelCase__ )
if upper:
_UpperCamelCase : Tuple = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
_UpperCamelCase : Optional[Any] = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
return to_simple_case(lowerCamelCase__ )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
try:
_UpperCamelCase : str = to_simple_case(lowerCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '_' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
lowerCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = [False] * len(UpperCAmelCase_ )
_UpperCamelCase : str = [s]
_UpperCamelCase : int = True
while queue:
_UpperCamelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [-1] * (len(UpperCAmelCase_ ))
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = float('Inf' )
_UpperCamelCase : Tuple = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , graph[parent[s]][s] )
_UpperCamelCase : Tuple = parent[s]
max_flow += path_flow
_UpperCamelCase : Optional[Any] = sink
while v != source:
_UpperCamelCase : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : List[str] = parent[v]
for i in range(len(UpperCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
"""simple docstring"""
a__ = 4_2 # [batch_size x 3]
a__ = 4_2 # [batch_size x 3]
a__ = 4_2 # [batch_size x 3]
a__ = 4_2 # [batch_size x 3]
a__ = 4_2
a__ = 4_2
a__ = 4_2
a__ = 4_2
a__ = 4_2
def A__ ( self):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def A__ ( self):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def A__ ( self):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def A__ ( self):
_UpperCamelCase : Optional[int] = torch.arange(self.height * self.width)
_UpperCamelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='trunc'),
] , axis=1 , )
return coords
@property
def A__ ( self):
_UpperCamelCase : str = self.shape
_UpperCamelCase : Any = int(np.prod(__snake_case))
_UpperCamelCase : Union[str, Any] = self.get_image_coords()
_UpperCamelCase : str = torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
_UpperCamelCase : List[Any] = self.get_camera_rays(__snake_case)
_UpperCamelCase : Tuple = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_UpperCamelCase : str = coords.view(__snake_case , -1 , 2)
_UpperCamelCase : Optional[Any] = self.resolution()
_UpperCamelCase : str = self.fov()
_UpperCamelCase : int = (flat.float() / (res - 1)) * 2 - 1
_UpperCamelCase : str = fracs * torch.tan(fov / 2)
_UpperCamelCase : Optional[Any] = fracs.view(__snake_case , -1 , 2)
_UpperCamelCase : Any = (
self.z.view(__snake_case , 1 , 3)
+ self.x.view(__snake_case , 1 , 3) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3) * fracs[:, :, 1:]
)
_UpperCamelCase : int = directions / directions.norm(dim=-1 , keepdim=__snake_case)
_UpperCamelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3)
def A__ ( self , __snake_case , __snake_case):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Tuple = []
_UpperCamelCase : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
_UpperCamelCase : Optional[Any] = np.array([np.sin(UpperCAmelCase_ ), np.cos(UpperCAmelCase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_UpperCamelCase : List[Any] = -z * 4
_UpperCamelCase : Dict = np.array([np.cos(UpperCAmelCase_ ), -np.sin(UpperCAmelCase_ ), 0.0] )
_UpperCamelCase : int = np.cross(UpperCAmelCase_ , UpperCAmelCase_ )
origins.append(UpperCAmelCase_ )
xs.append(UpperCAmelCase_ )
ys.append(UpperCAmelCase_ )
zs.append(UpperCAmelCase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCAmelCase_ , axis=0 ) ).float() , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCAmelCase_ )) , )
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCAmelCase__ = os.environ.get("""USER_TOKEN""", """""")
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> dict[Any, Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
a__ = ["accelerate", "launch"]
a__ = Path.home() / ".cache/huggingface/accelerate"
a__ = "default_config.yaml"
a__ = config_folder / config_file
a__ = config_folder / "_default_config.yaml"
a__ = Path("tests/test_configs" )
@classmethod
def A__ ( cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def A__ ( cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def A__ ( self):
_UpperCamelCase : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def A__ ( self):
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=__snake_case):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(__snake_case), self.test_file_path] , env=os.environ.copy())
def A__ ( self):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "test-tpu"
a__ = "us-central1-a"
a__ = "ls"
a__ = ["accelerate", "tpu-config"]
a__ = "cd /usr/share"
a__ = "tests/test_samples/test_command_file.sh"
a__ = "Running gcloud compute tpus tpu-vm ssh"
def A__ ( self):
_UpperCamelCase : List[str] = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Tuple = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=__snake_case)
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : List[str] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Tuple = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Optional[int] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def A__ ( self):
_UpperCamelCase : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
from itertools import product
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> list[int]:
'''simple docstring'''
_UpperCamelCase : int = sides_number
_UpperCamelCase : List[Any] = max_face_number * dice_number
_UpperCamelCase : Any = [0] * (max_total + 1)
_UpperCamelCase : Dict = 1
_UpperCamelCase : int = range(UpperCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(UpperCAmelCase_ , repeat=UpperCAmelCase_ ):
_UpperCamelCase : str = sum(UpperCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase_ ( ) -> float:
'''simple docstring'''
_UpperCamelCase : Optional[int] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCamelCase : Optional[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCamelCase : Dict = 0
_UpperCamelCase : Tuple = 9
_UpperCamelCase : int = 4 * 9
_UpperCamelCase : Union[str, Any] = 6
for peter_total in range(UpperCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCamelCase : int = (4**9) * (6**6)
_UpperCamelCase : Tuple = peter_wins_count / total_games_number
_UpperCamelCase : str = round(UpperCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case):
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case)
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase__ = """src/transformers"""
lowerCAmelCase__ = """docs/source/en"""
lowerCAmelCase__ = """."""
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : int = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase__ = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowerCAmelCase__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , UpperCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 2 if text == '✅' or text == '❌' else len(UpperCAmelCase_ )
_UpperCamelCase : Any = (width - text_length) // 2
_UpperCamelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Optional[int] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : Optional[Any] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : List[str] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = collections.defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = collections.defaultdict(UpperCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase_ ):
_UpperCamelCase : int = None
if attr_name.endswith('Tokenizer' ):
_UpperCamelCase : Optional[int] = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
_UpperCamelCase : int = fast_tokenizers
_UpperCamelCase : Optional[Any] = attr_name[:-1_3]
elif _re_tf_models.match(UpperCAmelCase_ ) is not None:
_UpperCamelCase : Tuple = tf_models
_UpperCamelCase : Union[str, Any] = _re_tf_models.match(UpperCAmelCase_ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase_ ) is not None:
_UpperCamelCase : List[Any] = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(UpperCAmelCase_ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase_ ) is not None:
_UpperCamelCase : Any = pt_models
_UpperCamelCase : Optional[int] = _re_pt_models.match(UpperCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Optional[Any] = True
break
# Try again after removing the last word in the name
_UpperCamelCase : Any = ''.join(camel_case_split(UpperCAmelCase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : Any = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : List[Any] = [len(UpperCAmelCase_ ) + 2 for c in columns]
_UpperCamelCase : Optional[int] = max([len(UpperCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Optional[Any] = '|' + '|'.join([_center_text(UpperCAmelCase_ , UpperCAmelCase_ ) for c, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
_UpperCamelCase : str = {True: '✅', False: '❌'}
for name in model_names:
_UpperCamelCase : Tuple = model_name_to_prefix[name]
_UpperCamelCase : int = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase_ , UpperCAmelCase_ ) for l, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )] ) + "|\n"
return table
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
_UpperCamelCase : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCamelCase : Dict = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCamelCase : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = dct.pop(UpperCAmelCase_ )
_UpperCamelCase : int = val
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> str:
'''simple docstring'''
if "handwritten" in checkpoint_url:
_UpperCamelCase : str = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase : List[Any] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
_UpperCamelCase : Optional[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCamelCase : Optional[Any] = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCamelCase : Any = 1_0_2_4
_UpperCamelCase : Union[str, Any] = 4_0_9_6
_UpperCamelCase : int = 2_4
_UpperCamelCase : List[str] = 1_6
_UpperCamelCase : Optional[Any] = 1_0_2_4
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = 'relu'
_UpperCamelCase : List[Any] = 1_0_2_4
_UpperCamelCase : str = True
_UpperCamelCase : Dict = False
_UpperCamelCase : Any = False
# load HuggingFace model
_UpperCamelCase : str = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = TrOCRForCausalLM(UpperCAmelCase_ )
_UpperCamelCase : Dict = VisionEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
_UpperCamelCase : int = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' , check_hash=UpperCAmelCase_ )['model']
_UpperCamelCase : List[Any] = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCamelCase : List[Any] = state_dict.pop(UpperCAmelCase_ )
if key.startswith('decoder' ) and "output_projection" not in key:
_UpperCamelCase : List[str] = val
else:
_UpperCamelCase : Optional[Any] = val
# load state dict
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image
_UpperCamelCase : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
_UpperCamelCase : Optional[Any] = RobertaTokenizer.from_pretrained('roberta-large' )
_UpperCamelCase : Optional[Any] = TrOCRProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Dict = processor(images=prepare_img(UpperCAmelCase_ ) , return_tensors='pt' ).pixel_values
# verify logits
_UpperCamelCase : int = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCamelCase : Tuple = model(pixel_values=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ )
_UpperCamelCase : Any = outputs.logits
_UpperCamelCase : Optional[Any] = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCamelCase : Any = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCamelCase : Dict = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCamelCase : List[str] = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCamelCase : Optional[Any] = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCAmelCase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Tuple = value
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case):
_UpperCamelCase : Optional[Any] = tree
def A__ ( self , __snake_case):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import cmath
import math
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> complex:
'''simple docstring'''
_UpperCamelCase : Dict = math.radians(UpperCAmelCase_ )
_UpperCamelCase : Tuple = math.radians(UpperCAmelCase_ )
# Convert voltage and current to rectangular form
_UpperCamelCase : Dict = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="resnet50" , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=True , __snake_case=True , ):
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : Dict = out_indices if out_indices is not None else [4]
_UpperCamelCase : Optional[Any] = stage_names
_UpperCamelCase : int = out_features
_UpperCamelCase : Any = backbone
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Any = use_pretrained_backbone
_UpperCamelCase : Dict = is_training
def A__ ( self):
_UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values
def A__ ( self):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = TimmBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Any = model(__snake_case)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def A__ ( self):
_UpperCamelCase : str = self.prepare_config_and_inputs()
_UpperCamelCase : int = config_and_inputs
_UpperCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (TimmBackbone,) if is_torch_available() else ()
a__ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = TimmBackboneModelTester(self)
_UpperCamelCase : List[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
_UpperCamelCase : int = 'resnet18'
_UpperCamelCase : Tuple = 'microsoft/resnet-18'
_UpperCamelCase : int = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case)
_UpperCamelCase : List[Any] = AutoBackbone.from_pretrained(__snake_case)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
_UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case , out_indices=[1, 2, 3])
_UpperCamelCase : Dict = AutoBackbone.from_pretrained(__snake_case , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def A__ ( self):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def A__ ( self):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def A__ ( self):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def A__ ( self):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def A__ ( self):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def A__ ( self):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def A__ ( self):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def A__ ( self):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def A__ ( self):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def A__ ( self):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def A__ ( self):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def A__ ( self):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def A__ ( self):
pass
@unittest.skip('Safetensors is not supported by timm.')
def A__ ( self):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(__snake_case)
_UpperCamelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Dict = True
_UpperCamelCase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
_UpperCamelCase : Any = self.all_model_classes[0]
_UpperCamelCase : List[Any] = model_class(__snake_case)
model.to(__snake_case)
_UpperCamelCase : List[Any] = self._prepare_for_class(__snake_case , __snake_case)
_UpperCamelCase : Optional[Any] = model(**__snake_case)
_UpperCamelCase : int = outputs[0][-1]
# Encoder-/Decoder-only models
_UpperCamelCase : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_UpperCamelCase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__snake_case)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(**__snake_case)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
_UpperCamelCase : int = copy.deepcopy(__snake_case)
_UpperCamelCase : str = None
_UpperCamelCase : Optional[Any] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(**__snake_case)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
_UpperCamelCase : Union[str, Any] = copy.deepcopy(__snake_case)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(**__snake_case)
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
lowerCAmelCase__ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = "left"
def __init__( self , __snake_case , __snake_case=False , __snake_case=True , __snake_case=False , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<sep>" , __snake_case="<pad>" , __snake_case="<cls>" , __snake_case="<mask>" , __snake_case=["<eop>", "<eod>"] , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Any = 3
_UpperCamelCase : List[Any] = do_lower_case
_UpperCamelCase : int = remove_space
_UpperCamelCase : Union[str, Any] = keep_accents
_UpperCamelCase : List[str] = vocab_file
_UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__snake_case)
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
_UpperCamelCase : Dict = self.__dict__.copy()
_UpperCamelCase : str = None
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self , __snake_case):
if self.remove_space:
_UpperCamelCase : Any = ' '.join(inputs.strip().split())
else:
_UpperCamelCase : Optional[Any] = inputs
_UpperCamelCase : str = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
_UpperCamelCase : List[str] = unicodedata.normalize('NFKD' , __snake_case)
_UpperCamelCase : Optional[int] = ''.join([c for c in outputs if not unicodedata.combining(__snake_case)])
if self.do_lower_case:
_UpperCamelCase : Dict = outputs.lower()
return outputs
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.preprocess_text(__snake_case)
_UpperCamelCase : Dict = self.sp_model.encode(__snake_case , out_type=__snake_case)
_UpperCamelCase : List[Any] = []
for piece in pieces:
if len(__snake_case) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
_UpperCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
_UpperCamelCase : int = cur_pieces[1:]
else:
_UpperCamelCase : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(__snake_case)
else:
new_pieces.append(__snake_case)
return new_pieces
def A__ ( self , __snake_case):
return self.sp_model.PieceToId(__snake_case)
def A__ ( self , __snake_case):
return self.sp_model.IdToPiece(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[Any] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = False , __snake_case = None , __snake_case = True , **__snake_case , ):
_UpperCamelCase : List[Any] = kwargs.pop('use_source_tokenizer' , __snake_case)
_UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(__snake_case , skip_special_tokens=__snake_case)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__snake_case))
_UpperCamelCase : List[Any] = []
sub_texts.append(__snake_case)
else:
current_sub_text.append(__snake_case)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__snake_case))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_UpperCamelCase : Optional[Any] = ''.join(__snake_case)
_UpperCamelCase : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase : int = self.clean_up_tokenization(__snake_case)
return clean_text
else:
return text
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is not None:
return ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1, 1]
return ([0] * len(__snake_case)) + [1, 1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : Tuple = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
from math import ceil
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0_1 ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_UpperCamelCase : Any = 2 * i + 1
_UpperCamelCase : Dict = 2 * i
_UpperCamelCase : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = IFInpaintingSuperResolutionPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
a__ = PipelineTesterMixin.required_optional_params - {"latents"}
def A__ ( self):
return self._get_superresolution_dummy_components()
def A__ ( self , __snake_case , __snake_case=0):
if str(__snake_case).startswith('mps'):
_UpperCamelCase : List[str] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Any = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCAmelCase__ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any]=True ) -> Any:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = None
a__ = None
def A__ ( self , __snake_case , __snake_case):
with TemporaryDirectory() as tmp_dir:
_UpperCamelCase : List[Any] = dataset_module_factory(__snake_case , cache_dir=__snake_case)
_UpperCamelCase : int = import_main_class(dataset_module.module_path , dataset=__snake_case)
_UpperCamelCase : DatasetBuilder = builder_cls(
cache_dir=__snake_case , config_name=__snake_case , hash=dataset_module.hash , )
_UpperCamelCase : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__snake_case).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
_UpperCamelCase : Tuple = cached_path(__snake_case , cache_dir=__snake_case)
self.assertTrue(os.path.exists(__snake_case))
@pytest.mark.integration
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_UpperCamelCase : Optional[Any] = dataset_module_factory('wikipedia' , cache_dir=UpperCAmelCase_ )
_UpperCamelCase : Tuple = import_main_class(dataset_module.module_path )
_UpperCamelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCAmelCase_ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_UpperCamelCase : Dict = None
builder_instance.download_and_prepare()
_UpperCamelCase : Optional[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = dataset_module_factory('wikipedia' , cache_dir=UpperCAmelCase_ )
_UpperCamelCase : Tuple = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ )
_UpperCamelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCAmelCase_ , config_name='20220301.frr' , hash=dataset_module.hash , )
_UpperCamelCase : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCAmelCase_ )
assert next(iter(ds['train'] ) )
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCAmelCase_ : int | float | str , UpperCAmelCase_ : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
_UpperCamelCase : str = int(UpperCAmelCase_ )
_UpperCamelCase : Tuple = int(UpperCAmelCase_ )
_UpperCamelCase : list[str] = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(UpperCAmelCase_ ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input("""Enter the last number (nth term) of the P-Series"""))
lowerCAmelCase__ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
from math import loga
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = TextToVideoSDPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0)
_UpperCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_UpperCamelCase : Any = CLIPTextModel(__snake_case)
_UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCamelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def A__ ( self , __snake_case , __snake_case=0):
if str(__snake_case).startswith('mps'):
_UpperCamelCase : Optional[Any] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Dict = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def A__ ( self):
_UpperCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : Optional[int] = TextToVideoSDPipeline(**__snake_case)
_UpperCamelCase : Any = sd_pipe.to(__snake_case)
sd_pipe.set_progress_bar_config(disable=__snake_case)
_UpperCamelCase : Any = self.get_dummy_inputs(__snake_case)
_UpperCamelCase : Dict = 'np'
_UpperCamelCase : List[Any] = sd_pipe(**__snake_case).frames
_UpperCamelCase : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_UpperCamelCase : Dict = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A__ ( self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=1e-2)
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def A__ ( self):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.')
def A__ ( self):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.')
def A__ ( self):
pass
def A__ ( self):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy')
_UpperCamelCase : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
_UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase : Optional[int] = pipe.to('cuda')
_UpperCamelCase : List[str] = 'Spiderman is surfing'
_UpperCamelCase : str = torch.Generator(device='cpu').manual_seed(0)
_UpperCamelCase : Any = pipe(__snake_case , generator=__snake_case , num_inference_steps=25 , output_type='pt').frames
_UpperCamelCase : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def A__ ( self):
_UpperCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy')
_UpperCamelCase : List[str] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b')
_UpperCamelCase : List[str] = pipe.to('cuda')
_UpperCamelCase : List[Any] = 'Spiderman is surfing'
_UpperCamelCase : Tuple = torch.Generator(device='cpu').manual_seed(0)
_UpperCamelCase : int = pipe(__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='pt').frames
_UpperCamelCase : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bloom"
a__ = ["past_key_values"]
a__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , __snake_case=25_08_80 , __snake_case=64 , __snake_case=2 , __snake_case=8 , __snake_case=1e-5 , __snake_case=0.0_2 , __snake_case=True , __snake_case=1 , __snake_case=2 , __snake_case=False , __snake_case=0.0 , __snake_case=0.0 , __snake_case=1 , __snake_case=False , **__snake_case , ):
_UpperCamelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCamelCase : Dict = kwargs.pop('n_embed' , __snake_case)
_UpperCamelCase : Optional[int] = hidden_size if n_embed is None else n_embed
_UpperCamelCase : Optional[int] = n_layer
_UpperCamelCase : Tuple = n_head
_UpperCamelCase : Any = layer_norm_epsilon
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = use_cache
_UpperCamelCase : Union[str, Any] = pretraining_tp
_UpperCamelCase : List[str] = apply_residual_connection_post_layernorm
_UpperCamelCase : Optional[int] = hidden_dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[str] = bos_token_id
_UpperCamelCase : Dict = eos_token_id
_UpperCamelCase : Optional[int] = slow_but_exact
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = version.parse("1.12" )
def __init__( self , __snake_case , __snake_case = "default" , __snake_case = None , __snake_case = False , ):
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case)
if not getattr(self._config , 'pad_token_id' , __snake_case):
# TODO: how to do that better?
_UpperCamelCase : int = 0
@property
def A__ ( self):
_UpperCamelCase : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__snake_case , direction='inputs' , inverted_values_shape=__snake_case)
_UpperCamelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCamelCase : Dict = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self):
return self._config.n_layer
@property
def A__ ( self):
return self._config.n_head
@property
def A__ ( self):
return 1e-3
def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ):
_UpperCamelCase : int = super(__snake_case , self).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
_UpperCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : List[str] = seqlen + 2
_UpperCamelCase : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
_UpperCamelCase : str = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCamelCase : List[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCamelCase : List[Any] = [
(torch.zeros(__snake_case), torch.zeros(__snake_case)) for _ in range(self.num_layers)
]
_UpperCamelCase : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCamelCase : List[Any] = ordered_inputs['attention_mask'].dtype
_UpperCamelCase : Optional[int] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case)] , dim=1)
return ordered_inputs
@property
def A__ ( self):
return 13
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = BarthezTokenizer
a__ = BarthezTokenizerFast
a__ = True
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Tuple = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case)
_UpperCamelCase : Optional[Any] = tokenizer
def A__ ( self):
_UpperCamelCase : List[Any] = '<pad>'
_UpperCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(__snake_case) , 10_11_22)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22)
@require_torch
def A__ ( self):
_UpperCamelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCamelCase : Tuple = [0, 57, 30_18, 7_03_07, 91, 2]
_UpperCamelCase : str = self.tokenizer(
__snake_case , max_length=len(__snake_case) , padding=__snake_case , truncation=__snake_case , return_tensors='pt')
self.assertIsInstance(__snake_case , __snake_case)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_UpperCamelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Dict = self.get_rust_tokenizer()
_UpperCamelCase : Optional[int] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : int = tokenizer.tokenize(__snake_case)
_UpperCamelCase : str = rust_tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
_UpperCamelCase : str = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[str] = self.get_rust_tokenizer()
_UpperCamelCase : Any = tokenizer.encode(__snake_case)
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Union[str, Any] = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCamelCase : Optional[int] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=__snake_case , )
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : list[float] ) -> float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
_UpperCamelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCAmelCase_ ) )
return round(UpperCAmelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = list(s_dict.keys() )
for key in keys:
_UpperCamelCase : Optional[int] = R'.*/layers_(\d+)'
_UpperCamelCase : int = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
_UpperCamelCase : Tuple = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
_UpperCamelCase : Tuple = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
_UpperCamelCase : int = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCamelCase : Optional[int] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''{key} -> {new_key}''' )
_UpperCamelCase : Union[str, Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase : Optional[Any] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase : Any = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCamelCase : Any = s_dict[key].shape[0]
_UpperCamelCase : Union[str, Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
_UpperCamelCase : Any = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowerCAmelCase__ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
_UpperCamelCase : str = f.read()
_UpperCamelCase : Optional[int] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
_UpperCamelCase : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCamelCase : List[Any] = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
_UpperCamelCase : Union[str, Any] = str(activation[1] )
_UpperCamelCase : Any = num_experts
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict="./" , UpperCAmelCase_ : Optional[int]=8 ) -> Optional[int]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
_UpperCamelCase : str = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : Dict = flax_params['target']
_UpperCamelCase : str = flatten_dict(UpperCAmelCase_ , sep='/' )
_UpperCamelCase : Dict = rename_keys(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCAmelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class lowercase ( _lowercase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(UpperCAmelCase_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = int(os.environ['RANK'] )
_UpperCamelCase : str = int(os.environ['WORLD_SIZE'] )
_UpperCamelCase : List[str] = ArgumentParser()
parser.add_argument('--streaming' , type=UpperCAmelCase_ )
parser.add_argument('--local_rank' , type=UpperCAmelCase_ )
parser.add_argument('--num_workers' , type=UpperCAmelCase_ , default=0 )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : Optional[int] = args.streaming
_UpperCamelCase : int = args.num_workers
_UpperCamelCase : List[str] = {'shards': [F'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase_ )]}
_UpperCamelCase : Tuple = IterableDataset.from_generator(UpperCAmelCase_ , gen_kwargs=UpperCAmelCase_ )
if not streaming:
_UpperCamelCase : List[Any] = Dataset.from_list(list(UpperCAmelCase_ ) )
_UpperCamelCase : int = split_dataset_by_node(UpperCAmelCase_ , rank=UpperCAmelCase_ , world_size=UpperCAmelCase_ )
_UpperCamelCase : Any = torch.utils.data.DataLoader(UpperCAmelCase_ , num_workers=UpperCAmelCase_ )
_UpperCamelCase : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCamelCase : Union[str, Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCamelCase : int = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=30 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=10 , __snake_case=0.0_2 , __snake_case=None , __snake_case=2 , ):
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : int = image_size
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = type_sequence_label_size
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Dict = scope
_UpperCamelCase : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Dict = (image_size // patch_size) ** 2
_UpperCamelCase : Dict = num_patches + 1
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = ViTModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = ViTForMaskedImageModeling(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : Optional[Any] = ViTForMaskedImageModeling(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = self.type_sequence_label_size
_UpperCamelCase : Optional[int] = ViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : List[Any] = ViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Union[str, Any] = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self):
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : Optional[Any] = config_and_inputs
_UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Any = ViTModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear))
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = model_class(__snake_case)
_UpperCamelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = ViTModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def A__ ( self):
_UpperCamelCase : List[str] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(__snake_case)
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : Optional[Any] = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__snake_case)
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : int = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@slow
def A__ ( self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : Tuple = ViTModel.from_pretrained('facebook/dino-vits8').to(__snake_case)
_UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80)
_UpperCamelCase : Tuple = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt')
_UpperCamelCase : str = inputs.pixel_values.to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(__snake_case , interpolate_pos_encoding=__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 36_01, 3_84))
self.assertEqual(outputs.last_hidden_state.shape , __snake_case)
_UpperCamelCase : Any = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self):
_UpperCamelCase : Any = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=__snake_case , return_tensors='pt')
_UpperCamelCase : List[Any] = inputs.pixel_values.to(__snake_case)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str = "cpu" , UpperCAmelCase_ : Union[str, None] = None ) -> None:
'''simple docstring'''
_UpperCamelCase : Tuple = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_UpperCamelCase : Dict = v.half()
if save_path is None: # overwrite src_path
_UpperCamelCase : str = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCAmelCase__ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCAmelCase__ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : Any = spearmanr(__snake_case , __snake_case)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = KandinskyVaaInpaintPipeline
a__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
a__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ = False
@property
def A__ ( self):
return 32
@property
def A__ ( self):
return 32
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_00
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCamelCase : Tuple = UNetaDConditionModel(**__snake_case)
return model
@property
def A__ ( self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
_UpperCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.dummy_unet
_UpperCamelCase : List[str] = self.dummy_movq
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=__snake_case , )
_UpperCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A__ ( self , __snake_case , __snake_case=0):
_UpperCamelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__snake_case)
# create init_image
_UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase : Dict = Image.fromarray(np.uinta(__snake_case)).convert('RGB').resize((2_56, 2_56))
# create mask
_UpperCamelCase : Tuple = np.ones((64, 64) , dtype=np.floataa)
_UpperCamelCase : int = 0
if str(__snake_case).startswith('mps'):
_UpperCamelCase : List[str] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Tuple = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : Optional[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A__ ( self):
_UpperCamelCase : List[str] = 'cpu'
_UpperCamelCase : Union[str, Any] = self.get_dummy_components()
_UpperCamelCase : str = self.pipeline_class(**__snake_case)
_UpperCamelCase : int = pipe.to(__snake_case)
pipe.set_progress_bar_config(disable=__snake_case)
_UpperCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__snake_case))
_UpperCamelCase : List[str] = output.images
_UpperCamelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(__snake_case) , return_dict=__snake_case , )[0]
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''')
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : List[Any] = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A__ ( self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
_UpperCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy')
_UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_UpperCamelCase : List[Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[str] = 'a hat'
_UpperCamelCase : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(__snake_case)
_UpperCamelCase : List[str] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa)
_UpperCamelCase : Union[str, Any] = pipeline.to(__snake_case)
pipeline.set_progress_bar_config(disable=__snake_case)
_UpperCamelCase : int = torch.Generator(device='cpu').manual_seed(0)
_UpperCamelCase : Union[str, Any] = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCamelCase : List[Any] = pipeline(
image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
_UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case)
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "glpn"
def __init__( self , __snake_case=3 , __snake_case=4 , __snake_case=[2, 2, 2, 2] , __snake_case=[8, 4, 2, 1] , __snake_case=[32, 64, 1_60, 2_56] , __snake_case=[7, 3, 3, 3] , __snake_case=[4, 2, 2, 2] , __snake_case=[1, 2, 5, 8] , __snake_case=[4, 4, 4, 4] , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=64 , __snake_case=10 , __snake_case=-1 , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Any = num_encoder_blocks
_UpperCamelCase : List[str] = depths
_UpperCamelCase : List[str] = sr_ratios
_UpperCamelCase : str = hidden_sizes
_UpperCamelCase : str = patch_sizes
_UpperCamelCase : Tuple = strides
_UpperCamelCase : Union[str, Any] = mlp_ratios
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : int = initializer_range
_UpperCamelCase : List[Any] = drop_path_rate
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Dict = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = max_depth
_UpperCamelCase : List[Any] = head_in_index
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowercase :
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__snake_case , __snake_case)
_UpperCamelCase : int = TFVisionTextDualEncoderModel(__snake_case)
_UpperCamelCase : str = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : Optional[Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : List[str] = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : str = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : List[Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_UpperCamelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__snake_case)
_UpperCamelCase : List[Any] = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : Union[str, Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : Optional[Any] = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
_UpperCamelCase : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case)
_UpperCamelCase : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(__snake_case)
_UpperCamelCase : Dict = model(input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case)
_UpperCamelCase : str = after_output[0].numpy()
_UpperCamelCase : Optional[int] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__snake_case , 1e-5)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : List[Any] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : Tuple = model(
input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , output_attentions=__snake_case)
_UpperCamelCase : str = output.vision_model_output.attentions
self.assertEqual(len(__snake_case) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : int = to_atuple(vision_model.config.image_size)
_UpperCamelCase : int = to_atuple(vision_model.config.patch_size)
_UpperCamelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase : Union[str, Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_UpperCamelCase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(__snake_case) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = np.abs((a - b)).max()
self.assertLessEqual(__snake_case , __snake_case , f'''Difference between torch and flax is {diff} (>= {tol}).''')
def A__ ( self):
_UpperCamelCase : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__snake_case)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.get_pretrained_model_and_inputs()
_UpperCamelCase : Optional[Any] = model_a(**__snake_case)
_UpperCamelCase : str = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__snake_case)
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(__snake_case)
_UpperCamelCase : str = model_a(**__snake_case)
_UpperCamelCase : Optional[int] = after_outputs[0].numpy()
_UpperCamelCase : Tuple = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__snake_case , 1e-5)
@require_tf
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
_UpperCamelCase : List[Any] = 13
_UpperCamelCase : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase : str = random_attention_mask([batch_size, 4])
_UpperCamelCase : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = TFViTModel(__snake_case , name='vision_model')
_UpperCamelCase : Dict = TFBertModel(__snake_case , name='text_model')
return vision_model, text_model
def A__ ( self):
_UpperCamelCase : Tuple = TFViTModelTester(self)
_UpperCamelCase : int = TFBertModelTester(self)
_UpperCamelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Dict = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Tuple = vision_config_and_inputs
(
_UpperCamelCase
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_UpperCamelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
_UpperCamelCase : Dict = 13
_UpperCamelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase : Tuple = random_attention_mask([batch_size, 4])
_UpperCamelCase : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case):
_UpperCamelCase : Optional[int] = self.get_vision_text_model(__snake_case , __snake_case)
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=__snake_case , text_model=__snake_case)
_UpperCamelCase : List[str] = model(
input_ids=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , output_attentions=__snake_case)
_UpperCamelCase : str = output.vision_model_output.attentions
self.assertEqual(len(__snake_case) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCamelCase : List[str] = to_atuple(vision_model.config.image_size)
_UpperCamelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size)
_UpperCamelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase : str = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
_UpperCamelCase : Tuple = output.text_model_output.attentions
self.assertEqual(len(__snake_case) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : str = TFDeiTModel(__snake_case , name='vision_model')
_UpperCamelCase : Dict = TFRobertaModel(__snake_case , name='text_model')
return vision_model, text_model
def A__ ( self):
_UpperCamelCase : List[str] = TFDeiTModelTester(self)
_UpperCamelCase : int = TFRobertaModelTester(self)
_UpperCamelCase : int = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase : str = vision_config_and_inputs
(
_UpperCamelCase
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
_UpperCamelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
_UpperCamelCase : List[Any] = 13
_UpperCamelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_UpperCamelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
_UpperCamelCase : str = random_attention_mask([batch_size, 4])
_UpperCamelCase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Dict = TFCLIPVisionModel(__snake_case , name='vision_model')
_UpperCamelCase : Tuple = TFBertModel(__snake_case , name='text_model')
return vision_model, text_model
def A__ ( self):
_UpperCamelCase : int = TFCLIPVisionModelTester(self)
_UpperCamelCase : str = TFBertModelTester(self)
_UpperCamelCase : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Tuple = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase : List[str] = vision_config_and_inputs
(
_UpperCamelCase
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self):
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=__snake_case)
_UpperCamelCase : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_UpperCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCamelCase : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__snake_case , padding=__snake_case , return_tensors='np')
_UpperCamelCase : Union[str, Any] = model(**__snake_case)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase : List[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __snake_case , atol=1e-3))
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase ( _lowercase ):
"""simple docstring"""
def __lt__( self , __snake_case):
return self[-1] < other[-1]
def __eq__( self , __snake_case):
return self[-1] == other[-1]
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
_UpperCamelCase : list[Stack] = []
# sort into stacks
for element in collection:
_UpperCamelCase : Any = Stack([element] )
_UpperCamelCase : Optional[int] = bisect_left(UpperCAmelCase_ , UpperCAmelCase_ )
if i != len(UpperCAmelCase_ ):
stacks[i].append(UpperCAmelCase_ )
else:
stacks.append(UpperCAmelCase_ )
# use a heap-based merge to merge stack efficiently
_UpperCamelCase : str = merge(*(reversed(UpperCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case):
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0')
_UpperCamelCase : Dict = img
_UpperCamelCase : Optional[int] = img.shape[1]
_UpperCamelCase : str = img.shape[0]
_UpperCamelCase : Any = dst_width
_UpperCamelCase : Dict = dst_height
_UpperCamelCase : Any = self.src_w / self.dst_w
_UpperCamelCase : Optional[int] = self.src_h / self.dst_h
_UpperCamelCase : str = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_55
)
def A__ ( self):
for i in range(self.dst_h):
for j in range(self.dst_w):
_UpperCamelCase : Optional[Any] = self.img[self.get_y(__snake_case)][self.get_x(__snake_case)]
def A__ ( self , __snake_case):
return int(self.ratio_x * x)
def A__ ( self , __snake_case):
return int(self.ratio_y * y)
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = 8_0_0, 6_0_0
lowerCAmelCase__ = imread("""image_data/lena.jpg""", 1)
lowerCAmelCase__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
import string
from math import logaa
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_UpperCamelCase : Optional[Any] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCamelCase : int = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCamelCase : Union[str, Any] = corpus_without_punctuation.split('\n' )
_UpperCamelCase : Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase__ = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : int = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase__ = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : str = list(s_dict.keys() )
for key in keys:
_UpperCamelCase : Tuple = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCamelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''{key} -> {new_key}''' )
_UpperCamelCase : int = s_dict.pop(UpperCAmelCase_ )
return s_dict
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = emb.weight.shape
_UpperCamelCase : Tuple = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
_UpperCamelCase : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> bytes:
'''simple docstring'''
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = os.path.basename(UpperCAmelCase_ )
_UpperCamelCase : Tuple = url.split('/' )[-2]
_UpperCamelCase : Any = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ) and not os.path.isfile(UpperCAmelCase_ ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = open(UpperCAmelCase_ , 'rb' ).read()
if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(UpperCAmelCase_ ) as source, open(UpperCAmelCase_ , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=8_0 , unit='iB' , unit_scale=UpperCAmelCase_ , unit_divisor=1_0_2_4 ) as loop:
while True:
_UpperCamelCase : Optional[Any] = source.read(8_1_9_2 )
if not buffer:
break
output.write(UpperCAmelCase_ )
loop.update(len(UpperCAmelCase_ ) )
_UpperCamelCase : Union[str, Any] = open(UpperCAmelCase_ , 'rb' ).read()
if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCamelCase : List[Any] = _download(_MODELS[checkpoint_path] )
else:
_UpperCamelCase : Dict = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : str = original_checkpoint['dims']
_UpperCamelCase : Dict = original_checkpoint['model_state_dict']
_UpperCamelCase : Optional[int] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(UpperCAmelCase_ )
rename_keys(UpperCAmelCase_ )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCamelCase : Any = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCamelCase : Any = WhisperForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0 and not set(UpperCAmelCase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
_UpperCamelCase : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCamelCase : str = proj_out_weights
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case="sayef/fsner-bert-base-uncased"):
super(__snake_case , self).__init__()
_UpperCamelCase : Any = AutoModel.from_pretrained(__snake_case , return_dict=__snake_case)
_UpperCamelCase : List[Any] = torch.nn.CosineSimilarity(3 , 1e-08)
_UpperCamelCase : Tuple = torch.nn.Softmax(dim=1)
def A__ ( self , **__snake_case):
return self.bert(**__snake_case).last_hidden_state
def A__ ( self , __snake_case):
return token_embeddings.sum(2 , keepdim=__snake_case)
def A__ ( self , __snake_case , __snake_case , __snake_case=1):
return self.softmax(T * self.cos(__snake_case , __snake_case))
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = W_supports['sizes'].tolist()
_UpperCamelCase : Dict = W_supports['start_token_id'].item()
_UpperCamelCase : str = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : Optional[Any] = self.BERT(**__snake_case)
_UpperCamelCase : List[str] = self.BERT(**__snake_case)
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = W_supports['input_ids'] == start_token_id
_UpperCamelCase : Optional[Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case):
if i == 0:
_UpperCamelCase : str = 0
else:
_UpperCamelCase : Dict = support_sizes[i - 1]
_UpperCamelCase : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T).sum(1).softmax(0)
_UpperCamelCase : List[str] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0)
if p_starts is not None:
_UpperCamelCase : List[Any] = torch.vstack((p_starts, p_start))
_UpperCamelCase : Any = torch.vstack((p_ends, p_end))
else:
_UpperCamelCase : int = p_start
_UpperCamelCase : Tuple = p_end
return p_starts, p_ends
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648
| 0
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=32 , __snake_case=2 , __snake_case=3 , __snake_case=16 , __snake_case=[1, 2, 1] , __snake_case=[2, 2, 4] , __snake_case=2 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=True , __snake_case=0.0_2 , __snake_case=1e-5 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=10 , __snake_case=8 , ):
_UpperCamelCase : int = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Tuple = num_channels
_UpperCamelCase : Union[str, Any] = embed_dim
_UpperCamelCase : Dict = depths
_UpperCamelCase : Tuple = num_heads
_UpperCamelCase : Dict = window_size
_UpperCamelCase : List[Any] = mlp_ratio
_UpperCamelCase : Optional[Any] = qkv_bias
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = drop_path_rate
_UpperCamelCase : int = hidden_act
_UpperCamelCase : List[Any] = use_absolute_embeddings
_UpperCamelCase : Tuple = patch_norm
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = scope
_UpperCamelCase : int = use_labels
_UpperCamelCase : List[str] = type_sequence_label_size
_UpperCamelCase : List[Any] = encoder_stride
def A__ ( self):
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : Optional[Any] = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = SwinvaModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
_UpperCamelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_UpperCamelCase : Tuple = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = SwinvaForMaskedImageModeling(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Tuple = SwinvaForMaskedImageModeling(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase : Dict = model(__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Union[str, Any] = self.type_sequence_label_size
_UpperCamelCase : List[str] = SwinvaForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Union[str, Any] = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase : List[str] = config_and_inputs
_UpperCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a__ = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Optional[Any] = SwinvaModelTester(self)
_UpperCamelCase : Dict = ConfigTester(self , config_class=__snake_case , embed_dim=37)
def A__ ( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def A__ ( self):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(__snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear))
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(__snake_case)
_UpperCamelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[Any] = True
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : str = True
_UpperCamelCase : Optional[int] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.attentions
_UpperCamelCase : Tuple = len(self.model_tester.depths)
self.assertEqual(len(__snake_case) , __snake_case)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : str = True
_UpperCamelCase : List[Any] = config.window_size**2
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : int = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : str = outputs.attentions
self.assertEqual(len(__snake_case) , __snake_case)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCamelCase : Optional[int] = len(__snake_case)
# Check attention is always last and order is fine
_UpperCamelCase : int = True
_UpperCamelCase : Any = True
_UpperCamelCase : int = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Dict = model(**self._prepare_for_class(__snake_case , __snake_case))
if hasattr(self.model_tester , 'num_hidden_states_types'):
_UpperCamelCase : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCamelCase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(__snake_case))
_UpperCamelCase : Tuple = outputs.attentions
self.assertEqual(len(__snake_case) , __snake_case)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Any = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : int = outputs.hidden_states
_UpperCamelCase : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__snake_case) , __snake_case)
# Swinv2 has a different seq_length
_UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_UpperCamelCase : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(__snake_case) , __snake_case)
_UpperCamelCase : List[Any] = reshaped_hidden_states[0].shape
_UpperCamelCase : Tuple = (
reshaped_hidden_states[0].view(__snake_case , __snake_case , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCamelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = SwinvaModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : str = _config_zero_init(__snake_case)
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(config=__snake_case)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def A__ ( self):
_UpperCamelCase : Dict = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
__snake_case)
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCamelCase : Dict = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : int = model(**__snake_case)
# verify the logits
_UpperCamelCase : int = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : str = "bert-base-cased" ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : Any = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCamelCase : int = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase : int = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : Any = config['lr']
_UpperCamelCase : List[str] = int(config['num_epochs'] )
_UpperCamelCase : Optional[int] = int(config['seed'] )
_UpperCamelCase : List[Any] = int(config['batch_size'] )
_UpperCamelCase : Optional[Any] = args.model_name_or_path
set_seed(UpperCAmelCase_ )
_UpperCamelCase : List[str] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_UpperCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : Dict = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_UpperCamelCase : List[Any] = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase : Any = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = evaluate.load('glue' , 'mrpc' )
_UpperCamelCase : int = num_epochs
if args.partial_train_epoch is not None:
_UpperCamelCase : int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase : str = args.resume_from_checkpoint.split('epoch_' )[1]
_UpperCamelCase : List[str] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCamelCase : Any = int(UpperCAmelCase_ ) + 1
_UpperCamelCase : Optional[Any] = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase_ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
_UpperCamelCase : Dict = json.load(UpperCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCamelCase : int = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ )
_UpperCamelCase : int = outputs.loss
_UpperCamelCase : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCamelCase : int = F'''epoch_{epoch}'''
_UpperCamelCase : Tuple = os.path.join(args.output_dir , UpperCAmelCase_ )
accelerator.save_state(UpperCAmelCase_ )
_UpperCamelCase : Any = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : int = accuracy
_UpperCamelCase : Tuple = lr_scheduler.get_lr()[0]
_UpperCamelCase : int = optimizer.param_groups[0]['lr']
_UpperCamelCase : int = epoch
_UpperCamelCase : List[Any] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=2 , help='Number of train epochs.' , )
_UpperCamelCase : Optional[int] = parser.parse_args()
_UpperCamelCase : Union[str, Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
"""simple docstring"""
a__ = BlenderbotSmallConfig
a__ = {}
a__ = "gelu"
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=20 , __snake_case=2 , __snake_case=1 , __snake_case=0 , ):
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : Any = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : int = max_position_embeddings
_UpperCamelCase : Optional[int] = eos_token_id
_UpperCamelCase : List[Any] = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
def A__ ( self):
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase : Any = prepare_blenderbot_small_inputs_dict(__snake_case , __snake_case , __snake_case)
return config, inputs_dict
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = TFBlenderbotSmallModel(config=__snake_case).get_decoder()
_UpperCamelCase : Dict = inputs_dict['input_ids']
_UpperCamelCase : Optional[int] = input_ids[:1, :]
_UpperCamelCase : List[Any] = inputs_dict['attention_mask'][:1, :]
_UpperCamelCase : Union[str, Any] = inputs_dict['head_mask']
_UpperCamelCase : Tuple = 1
# first forward pass
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case)
_UpperCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case)[0]
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase : Optional[int] = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a__ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ = True
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Tuple = TFBlenderbotSmallModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case)
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
a__ = "facebook/blenderbot_small-90M"
@cached_property
def A__ ( self):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
@cached_property
def A__ ( self):
_UpperCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ ( self):
_UpperCamelCase : Optional[int] = self.tokenizer(self.src_text , return_tensors='tf')
_UpperCamelCase : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , )
_UpperCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 700
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : int = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : int = config['lr']
_UpperCamelCase : Union[str, Any] = int(config['num_epochs'] )
_UpperCamelCase : Optional[int] = int(config['seed'] )
_UpperCamelCase : Dict = int(config['batch_size'] )
_UpperCamelCase : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Any = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_UpperCamelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : int = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_UpperCamelCase : Any = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase : str = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : int = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : Union[str, Any] = 0
# Now we train the model
_UpperCamelCase : str = evaluate.load('glue' , 'mrpc' )
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[str] = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[str] = outputs.loss
_UpperCamelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_UpperCamelCase : Optional[int] = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase : str = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
_UpperCamelCase : Any = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_UpperCamelCase : Optional[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , )
_UpperCamelCase : Tuple = parser.parse_args()
_UpperCamelCase : Optional[int] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase ( nn.Module ):
"""simple docstring"""
a__ = 4_2
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __snake_case):
_UpperCamelCase : str = hidden_states.shape
_UpperCamelCase : Union[str, Any] = jax.image.resize(
__snake_case , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_UpperCamelCase : Dict = self.conv(__snake_case)
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
a__ = 4_2
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __snake_case):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCamelCase : Any = self.conv(__snake_case)
return hidden_states
class lowercase ( nn.Module ):
"""simple docstring"""
a__ = 4_2
a__ = None
a__ = 0.0
a__ = None
a__ = jnp.floataa
def A__ ( self):
_UpperCamelCase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
_UpperCamelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
_UpperCamelCase : Tuple = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : int = nn.Dense(__snake_case , dtype=self.dtype)
_UpperCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5)
_UpperCamelCase : Tuple = nn.Dropout(self.dropout_prob)
_UpperCamelCase : List[str] = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCamelCase : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCamelCase : str = None
if use_nin_shortcut:
_UpperCamelCase : Optional[int] = nn.Conv(
__snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , __snake_case , __snake_case , __snake_case=True):
_UpperCamelCase : str = hidden_states
_UpperCamelCase : Optional[Any] = self.norma(__snake_case)
_UpperCamelCase : List[Any] = nn.swish(__snake_case)
_UpperCamelCase : List[Any] = self.conva(__snake_case)
_UpperCamelCase : List[str] = self.time_emb_proj(nn.swish(__snake_case))
_UpperCamelCase : Dict = jnp.expand_dims(jnp.expand_dims(__snake_case , 1) , 1)
_UpperCamelCase : str = hidden_states + temb
_UpperCamelCase : Optional[int] = self.norma(__snake_case)
_UpperCamelCase : List[Any] = nn.swish(__snake_case)
_UpperCamelCase : Optional[Any] = self.dropout(__snake_case , __snake_case)
_UpperCamelCase : Tuple = self.conva(__snake_case)
if self.conv_shortcut is not None:
_UpperCamelCase : Dict = self.conv_shortcut(__snake_case)
return hidden_states + residual
| 703
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0 , UpperCAmelCase_ : int = 1_0_0_0 , UpperCAmelCase_ : bool = True ) -> int:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> None:
'''simple docstring'''
assert (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
_UpperCamelCase : Optional[int] = lower
_UpperCamelCase : int = higher
_UpperCamelCase : List[str] = []
while True:
_UpperCamelCase : int = get_avg(UpperCAmelCase_ , UpperCAmelCase_ )
last_numbers.append(UpperCAmelCase_ )
if answer(UpperCAmelCase_ ) == "low":
_UpperCamelCase : List[str] = number
elif answer(UpperCAmelCase_ ) == "high":
_UpperCamelCase : int = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
_UpperCamelCase : Dict = int(input('Enter lower value : ' ).strip() )
_UpperCamelCase : List[Any] = int(input('Enter high value : ' ).strip() )
_UpperCamelCase : Dict = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 705
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith('encoder' ):
_UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' )
_UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' )
_UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ )
_UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_UpperCamelCase : Tuple = v
lowerCAmelCase__ = ["""START"""]
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : int = model['model']
_UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = m.model.state_dict().keys()
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 648
| 0
|
import string
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_UpperCamelCase : List[Any] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_UpperCamelCase : Optional[int] = string.ascii_uppercase.find(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = num - key
if num < 0:
_UpperCamelCase : List[str] = num + len(string.ascii_uppercase )
_UpperCamelCase : Union[str, Any] = translated + string.ascii_uppercase[num]
else:
_UpperCamelCase : Optional[Any] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
_UpperCamelCase : Any = input('Encrypted message: ' )
_UpperCamelCase : Any = message.upper()
decrypt(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 706
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __A ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = DebertaVaTokenizer
a__ = DebertaVaTokenizerFast
a__ = True
a__ = True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : List[str] = DebertaVaTokenizer(__snake_case , unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self , __snake_case):
_UpperCamelCase : List[Any] = 'this is a test'
_UpperCamelCase : int = 'this is a test'
return input_text, output_text
def A__ ( self):
_UpperCamelCase : List[str] = '<pad>'
_UpperCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '[PAD]')
self.assertEqual(len(__snake_case) , 3_00_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00)
def A__ ( self):
# fmt: off
_UpperCamelCase : Union[str, Any] = ' \tHeLLo!how \n Are yoU? '
_UpperCamelCase : Tuple = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_UpperCamelCase : str = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case)
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case)
_UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def A__ ( self):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def A__ ( self):
pass
def A__ ( self):
# fmt: off
_UpperCamelCase : List[str] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_UpperCamelCase : int = DebertaVaTokenizer(__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[int] = DebertaVaTokenizerFast(__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_UpperCamelCase : Optional[Any] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Tuple = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_UpperCamelCase : Any = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Tuple = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_UpperCamelCase : List[str] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[str] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
_UpperCamelCase : Dict = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_UpperCamelCase : List[Any] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Union[str, Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case)
_UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCamelCase : int = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
_UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case))
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : List[str] = tokenizer.encode(__snake_case)
_UpperCamelCase : List[str] = rust_tokenizer.encode(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Dict = 'This is a test'
_UpperCamelCase : Tuple = [13, 1, 43_98, 25, 21, 12_89]
_UpperCamelCase : Any = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_UpperCamelCase : Any = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_UpperCamelCase : Optional[Any] = DebertaVaTokenizer(__snake_case , keep_accents=__snake_case)
_UpperCamelCase : Union[str, Any] = DebertaVaTokenizerFast(__snake_case , keep_accents=__snake_case)
_UpperCamelCase : Union[str, Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : List[Any] = tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Dict = rust_tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
# fmt: off
_UpperCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_UpperCamelCase : List[str] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
_UpperCamelCase : List[str] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_UpperCamelCase : Any = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_UpperCamelCase : Any = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : str = tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : Any = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = rust_tokenizer.tokenize(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
_UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(__snake_case , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(__snake_case)
_UpperCamelCase : Tuple = tokenizer.encode('sequence builders')
_UpperCamelCase : List[Any] = tokenizer.encode('multi-sequence build')
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(__snake_case)
_UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __snake_case)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __snake_case , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[Any] = {'input_ids': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 707
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648
| 0
|
import math
def lowerCamelCase_ ( ):
'''simple docstring'''
_UpperCamelCase : Any = input('Enter message: ' )
_UpperCamelCase : Optional[int] = int(input(F'''Enter key [2-{len(UpperCAmelCase_ ) - 1}]: ''' ) )
_UpperCamelCase : int = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
_UpperCamelCase : Dict = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('d' ):
_UpperCamelCase : Union[str, Any] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
'''simple docstring'''
_UpperCamelCase : Any = [''] * key
for col in range(UpperCAmelCase_ ):
_UpperCamelCase : int = col
while pointer < len(UpperCAmelCase_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = math.ceil(len(UpperCAmelCase_ ) / key )
_UpperCamelCase : Union[str, Any] = key
_UpperCamelCase : int = (num_cols * num_rows) - len(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = [''] * num_cols
_UpperCamelCase : Dict = 0
_UpperCamelCase : Union[str, Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_UpperCamelCase : str = 0
row += 1
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = num_channels
_UpperCamelCase : Optional[Any] = embeddings_size
_UpperCamelCase : Tuple = hidden_sizes
_UpperCamelCase : Dict = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[int] = num_labels
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = len(__snake_case)
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Union[str, Any] = out_indices
_UpperCamelCase : int = num_groups
def A__ ( self):
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = BitModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Dict = BitForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
_UpperCamelCase : Any = None
_UpperCamelCase : str = BitBackbone(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Any = model(__snake_case)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def A__ ( self):
_UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Dict = BitModelTester(self)
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
@unittest.skip(reason='Bit does not output attentions')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def A__ ( self):
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__snake_case)
_UpperCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case)
def A__ ( self):
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(config=__snake_case)
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case))
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__snake_case) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Any = layer_type
_UpperCamelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case)
@unittest.skip(reason='Bit does not use feedforward chunking')
def A__ ( self):
pass
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
@slow
def A__ ( self):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def A__ ( self):
_UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case)
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__snake_case)
# verify the logits
_UpperCamelCase : Dict = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __snake_case)
_UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4))
@require_torch
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def A__ ( self):
_UpperCamelCase : List[str] = BitModelTester(self)
| 648
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase__ = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def lowerCamelCase_ ( UpperCAmelCase_ : str = "dhaka" , UpperCAmelCase_ : int = 5 ) -> int:
'''simple docstring'''
_UpperCamelCase : str = min(UpperCAmelCase_ , 5_0 ) # Prevent abuse!
_UpperCamelCase : int = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_UpperCamelCase : str = requests.get('https://www.google.com/search' , params=UpperCAmelCase_ , headers=UpperCAmelCase_ )
_UpperCamelCase : List[Any] = BeautifulSoup(html.text , 'html.parser' )
_UpperCamelCase : int = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_UpperCamelCase : Optional[Any] = json.dumps(UpperCAmelCase_ )
_UpperCamelCase : List[str] = json.loads(UpperCAmelCase_ )
_UpperCamelCase : Dict = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , UpperCAmelCase_ , )
if not matched_google_image_data:
return 0
_UpperCamelCase : Union[str, Any] = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(UpperCAmelCase_ ) , )
_UpperCamelCase : int = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , UpperCAmelCase_ , )
for index, fixed_full_res_image in enumerate(UpperCAmelCase_ ):
if index >= max_images:
return index
_UpperCamelCase : Optional[Any] = bytes(UpperCAmelCase_ , 'ascii' ).decode(
'unicode-escape' )
_UpperCamelCase : Union[str, Any] = bytes(UpperCAmelCase_ , 'ascii' ).decode(
'unicode-escape' )
_UpperCamelCase : List[str] = urllib.request.build_opener()
_UpperCamelCase : Tuple = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
urllib.request.urlretrieve( # noqa: S310
UpperCAmelCase_ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowerCAmelCase__ = download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 709
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : Tuple = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : str = iteration_step(UpperCAmelCase_ )
return vectors
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase : str = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
_UpperCamelCase : str = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCamelCase : list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 710
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""MobileNetV2FeatureExtractor"""]
lowerCAmelCase__ = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase , _UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 648
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar("""T""")
class lowercase ( Generic[T] ):
"""simple docstring"""
a__ = 4_2 # Cache store of keys
a__ = 4_2 # References of the keys in cache
a__ = 1_0 # Maximum capacity of cache
def __init__( self , __snake_case):
_UpperCamelCase : str = deque()
_UpperCamelCase : str = set()
if not n:
_UpperCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.')
else:
_UpperCamelCase : Tuple = n
def A__ ( self , __snake_case):
if x not in self.key_reference:
if len(self.dq_store) == LRUCache._MAX_CAPACITY:
_UpperCamelCase : Any = self.dq_store.pop()
self.key_reference.remove(__snake_case)
else:
self.dq_store.remove(__snake_case)
self.dq_store.appendleft(__snake_case)
self.key_reference.add(__snake_case)
def A__ ( self):
for k in self.dq_store:
print(__snake_case)
def __repr__( self):
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
import functools
def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase_ ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(UpperCAmelCase_ : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648
| 0
|
'''simple docstring'''
from functools import lru_cache
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> set:
'''simple docstring'''
_UpperCamelCase : Dict = 2
_UpperCamelCase : Optional[int] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase_ )
if n > 1:
factors.add(UpperCAmelCase_ )
return factors
@lru_cache
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return len(unique_prime_factors(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> bool:
'''simple docstring'''
return len(set(UpperCAmelCase_ ) ) in (0, 1)
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> list:
'''simple docstring'''
_UpperCamelCase : List[Any] = 2
while True:
# Increment each value of a generated range
_UpperCamelCase : str = [base + i for i in range(UpperCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCamelCase : Union[str, Any] = [upf_len(UpperCAmelCase_ ) for x in group]
checker.append(UpperCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase_ ( UpperCAmelCase_ : int = 4 ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = run(UpperCAmelCase_ )
return results[0] if len(UpperCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
_UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
_UpperCamelCase : Dict = 0, 0
_UpperCamelCase : Optional[int] = n - i
_UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
_UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
_UpperCamelCase : str = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCamelCase : Optional[Any] = _k
break
if max_jump >= 0:
_UpperCamelCase : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCamelCase : Tuple = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
_UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Union[str, Any] = []
else:
_UpperCamelCase : List[Any] = {c: []}
_UpperCamelCase : Optional[int] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
_UpperCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCamelCase : Union[str, Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCamelCase : Any = i
_UpperCamelCase : Any = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCamelCase : Union[str, Any] = ds_c + ds_b
diff += addend
_UpperCamelCase : Union[str, Any] = 0
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = a_i[j] + addend
_UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = digits[j] + addend
if s >= 1_0:
_UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 )
_UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient
else:
_UpperCamelCase : Dict = s
_UpperCamelCase : Optional[Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 )
digits.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [1]
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : int = 0
while True:
_UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
_UpperCamelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 715
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """▁"""
lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase__ = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token
_UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__snake_case))
_UpperCamelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset
_UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
_UpperCamelCase : List[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __snake_case):
_UpperCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def A__ ( self , __snake_case , __snake_case = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __snake_case , __snake_case = None , __snake_case = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case)
if token_ids_a is None:
return [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1]
def A__ ( self , __snake_case , __snake_case = None):
_UpperCamelCase : Optional[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def A__ ( self):
_UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self , __snake_case):
return self.sp_model.encode(__snake_case , out_type=__snake_case)
def A__ ( self , __snake_case):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase : str = self.sp_model.PieceToId(__snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self , __snake_case):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self , __snake_case):
_UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip()
return out_string
def A__ ( self , __snake_case , __snake_case = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase : str = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __snake_case)
elif not os.path.isfile(self.vocab_file):
with open(__snake_case , 'wb') as fi:
_UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case)
return (out_vocab_file,)
| 648
| 0
|
import math
from datetime import datetime, timedelta
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> datetime:
'''simple docstring'''
_UpperCamelCase : Tuple = year % 1_9
_UpperCamelCase : int = year % 4
_UpperCamelCase : List[Any] = year % 7
_UpperCamelCase : Optional[int] = math.floor(year / 1_0_0 )
_UpperCamelCase : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_UpperCamelCase : Any = leap_day_inhibits / 4
_UpperCamelCase : List[str] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_UpperCamelCase : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_UpperCamelCase : Optional[int] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_UpperCamelCase : Optional[int] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 1_8 )
else:
return datetime(UpperCAmelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase__ = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 716
|
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = F'''Expected string as input, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = F'''Expected boolean as use_pascal parameter, found {type(UpperCAmelCase_ )}'''
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Any = input_str.split('_' )
_UpperCamelCase : Dict = 0 if use_pascal else 1
_UpperCamelCase : Union[str, Any] = words[start_index:]
_UpperCamelCase : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCamelCase : Optional[int] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.