code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''xlnet'''
UpperCAmelCase_ : Dict = ['''mems''']
UpperCAmelCase_ : List[str] = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __lowerCAmelCase=32000 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=True , __lowerCAmelCase="bi" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase="last" , __lowerCAmelCase=True , __lowerCAmelCase="tanh" , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
lowerCAmelCase = d_model // n_head
lowerCAmelCase = ff_activation
lowerCAmelCase = d_inner
lowerCAmelCase = untie_r
lowerCAmelCase = attn_type
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = dropout
lowerCAmelCase = mem_len
lowerCAmelCase = reuse_len
lowerCAmelCase = bi_data
lowerCAmelCase = clamp_len
lowerCAmelCase = same_length
lowerCAmelCase = summary_type
lowerCAmelCase = summary_use_proj
lowerCAmelCase = summary_activation
lowerCAmelCase = summary_last_dropout
lowerCAmelCase = start_n_top
lowerCAmelCase = end_n_top
lowerCAmelCase = bos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
lowerCAmelCase = kwargs["""use_cache"""]
lowerCAmelCase = use_mems_eval
lowerCAmelCase = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
'''simple docstring'''
from collections import defaultdict
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCAmelCase = [
[-1 for i in range(total + 1)] for j in range(2 ** len(__lowerCAmelCase))
]
lowerCAmelCase = defaultdict(__lowerCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCAmelCase = (1 << len(__lowerCAmelCase)) - 1
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCAmelCase = self.count_ways_until(__lowerCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
for i in range(len(__lowerCAmelCase)):
for j in task_performed[i]:
self.task[j].append(__lowerCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
__lowercase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowercase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = [0] * len(_A )
lowerCAmelCase = []
lowerCAmelCase = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowerCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__lowercase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''Salesforce/blip-image-captioning-base'''
UpperCAmelCase_ : List[str] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCAmelCase_ : Optional[int] = '''image_captioner'''
UpperCAmelCase_ : List[Any] = AutoModelForVisionaSeq
UpperCAmelCase_ : Optional[Any] = ['''image''']
UpperCAmelCase_ : Union[str, Any] = ['''text''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
requires_backends(self , ["""vision"""])
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.pre_processor(images=__lowerCAmelCase , return_tensors="""pt""")
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase)[0].strip()
| 272 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = TransfoXLTokenizer
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Union[str, Any] = False
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """<unk> UNwanted , running"""
lowerCAmelCase = """<unk> unwanted, running"""
return input_text, output_text
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""<unk> UNwanted , running""")
self.assertListEqual(__lowerCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [0, 4, 8, 7])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowerCAmelCase)
lowerCAmelCase = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowerCAmelCase = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = len(__lowerCAmelCase)
tokenizer.add_tokens(["""new1""", """new2"""])
tokenizer.move_added_token("""new1""" , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""") , [1])
self.assertEqual(tokenizer.decode([1]) , """new1""")
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(_A , _A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 2
while digits < n:
index += 1
lowerCAmelCase = len(str(fibonacci(_A ) ) )
return index
def snake_case__ ( _A: int = 1000 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(_A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 272 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 1 |
'''simple docstring'''
class a__:
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = {}
def a_ ( self):
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(__lowerCAmelCase , """ -> """ , """ -> """.join([str(__lowerCAmelCase) for j in self.vertex[i]]))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase)
else:
# else make a new vertex
lowerCAmelCase = [to_vertex]
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = True
print(__lowerCAmelCase , end=""" """)
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase)
if __name__ == "__main__":
__lowercase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 272 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowercase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__lowercase = {'''facebook/blenderbot_small-90M''': 5_1_2}
def snake_case__ ( _A: Dict ) -> int:
'''simple docstring'''
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_A )
return pairs
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="__start__" , __lowerCAmelCase="__end__" , __lowerCAmelCase="__unk__" , __lowerCAmelCase="__null__" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , **__lowerCAmelCase)
with open(__lowerCAmelCase , encoding="""utf-8""") as vocab_handle:
lowerCAmelCase = json.load(__lowerCAmelCase)
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding="""utf-8""") as merges_handle:
lowerCAmelCase = merges_handle.read().split("""\n""")[1:-1]
lowerCAmelCase = [tuple(merge.split()) for merge in merges]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = {}
@property
def a_ ( self):
"""simple docstring"""
return len(self.encoder)
def a_ ( self):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase = re.sub("""([.,!?()])""" , r""" \1""" , __lowerCAmelCase)
lowerCAmelCase = re.sub("""(')""" , r""" \1 """ , __lowerCAmelCase)
lowerCAmelCase = re.sub(r"""\s{2,}""" , """ """ , __lowerCAmelCase)
if "\n" in token:
lowerCAmelCase = token.replace("""\n""" , """ __newln__""")
lowerCAmelCase = token.split(""" """)
lowerCAmelCase = []
for token in tokens:
if not len(__lowerCAmelCase):
continue
lowerCAmelCase = token.lower()
lowerCAmelCase = tuple(__lowerCAmelCase)
lowerCAmelCase = tuple(list(word[:-1]) + [word[-1] + """</w>"""])
lowerCAmelCase = get_pairs(__lowerCAmelCase)
if not pairs:
words.append(__lowerCAmelCase)
continue
while True:
lowerCAmelCase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase: self.bpe_ranks.get(__lowerCAmelCase , float("""inf""")))
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(__lowerCAmelCase):
try:
lowerCAmelCase = word.index(__lowerCAmelCase , __lowerCAmelCase)
new_word.extend(word[i:j])
lowerCAmelCase = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(__lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowerCAmelCase = tuple(__lowerCAmelCase)
lowerCAmelCase = new_word
if len(__lowerCAmelCase) == 1:
break
else:
lowerCAmelCase = get_pairs(__lowerCAmelCase)
lowerCAmelCase = """@@ """.join(__lowerCAmelCase)
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
words.append(__lowerCAmelCase)
return " ".join(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = re.findall(r"""\S+\n?""" , __lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase).split(""" """)))
return split_tokens
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = token.lower()
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.decoder.get(__lowerCAmelCase , self.unk_token)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """ """.join(__lowerCAmelCase).replace("""@@ """ , """""").strip()
return out_string
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
lowerCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase) + """\n""")
lowerCAmelCase = 0
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""")
lowerCAmelCase = token_index
writer.write(""" """.join(__lowerCAmelCase) + """\n""")
index += 1
return vocab_file, merge_file
| 272 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ : Any = '''CLIPImageProcessor'''
UpperCAmelCase_ : int = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCAmelCase , )
lowerCAmelCase = kwargs.pop("""feature_extractor""")
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase)
if images is not None:
lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase)
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase) , tensor_type=__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 272 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 1 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase="" , __lowerCAmelCase="train"):
"""simple docstring"""
assert os.path.isdir(__lowerCAmelCase)
lowerCAmelCase = []
lowerCAmelCase = os.listdir(__lowerCAmelCase)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase)
if not os.path.isfile(__lowerCAmelCase):
continue
self.documents.append(__lowerCAmelCase)
def __len__( self):
"""simple docstring"""
return len(self.documents)
def __getitem__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.documents[idx]
lowerCAmelCase = document_path.split("""/""")[-1]
with open(__lowerCAmelCase , encoding="""utf-8""") as source:
lowerCAmelCase = source.read()
lowerCAmelCase , lowerCAmelCase = process_story(__lowerCAmelCase)
return document_name, story_lines, summary_lines
def snake_case__ ( _A: Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase = list(filter(lambda _A : len(_A ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase = [_add_missing_period(_A ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase = []
lowerCAmelCase = deque(_A )
while True:
try:
lowerCAmelCase = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(_A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase = list(filter(lambda _A : not t.startswith("""@highlight""" ) , _A ) )
return story_lines, summary_lines
def snake_case__ ( _A: List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( _A: Optional[int] , _A: Tuple , _A: int ) -> Dict:
'''simple docstring'''
if len(_A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_A )) )
return sequence
def snake_case__ ( _A: Optional[int] , _A: Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = torch.ones_like(_A )
lowerCAmelCase = sequence == pad_token_id
lowerCAmelCase = 0
return mask
def snake_case__ ( _A: int , _A: Optional[int] , _A: Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase = [tokenizer.encode(_A ) for line in story_lines]
lowerCAmelCase = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase = [tokenizer.encode(_A ) for line in summary_lines]
lowerCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( _A: List[Any] , _A: List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = []
for sequence in batch:
lowerCAmelCase = -1
lowerCAmelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_A )
return torch.tensor(_A )
| 272 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : List[str]
UpperCAmelCase_ : Optional[str] = None
# Automatically constructed
UpperCAmelCase_ : ClassVar[str] = "dict"
UpperCAmelCase_ : ClassVar[Any] = None
UpperCAmelCase_ : str = field(default='''Translation''' , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def a_ ( self):
"""simple docstring"""
from .features import Value
return {k: Value("""string""") for k in sorted(self.languages)}
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : Optional[List] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[str] = None
# Automatically constructed
UpperCAmelCase_ : ClassVar[str] = "dict"
UpperCAmelCase_ : ClassVar[Any] = None
UpperCAmelCase_ : str = field(default='''TranslationVariableLanguages''' , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = sorted(set(self.languages)) if self.languages else None
lowerCAmelCase = len(self.languages) if self.languages else None
def __call__( self):
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string()), """translation""": pa.list_(pa.string())})
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = set(self.languages)
if self.languages and set(__lowerCAmelCase) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__lowerCAmelCase) - lang_set))}) are not in valid set ({', '.join(__lowerCAmelCase)}).")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
lowerCAmelCase , lowerCAmelCase = zip(*sorted(__lowerCAmelCase))
return {"language": languages, "translation": translations}
def a_ ( self):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""")),
"translation": Sequence(Value("""string""")),
}
| 272 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 1 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def snake_case__ ( _A: int , _A: Tuple , _A: str=0 ) -> Union[str, Any]:
'''simple docstring'''
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowerCAmelCase = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def snake_case__ ( _A: Tuple , _A: List[Any] , _A: Dict , _A: Optional[Any] , _A: Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*_A )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*_A )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*_A )
return param
def snake_case__ ( _A: int , _A: List[Any] , _A: List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict["""checkpoint_version"""]
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict["""model"""]
# The language model.
lowerCAmelCase = model["""language_model"""]
# The embeddings.
lowerCAmelCase = lm["""embedding"""]
# The word embeddings.
lowerCAmelCase = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCAmelCase = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowerCAmelCase = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer["""final_layernorm.weight"""]
lowerCAmelCase = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowerCAmelCase = torch.load(_A , map_location="""cpu""" )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowerCAmelCase = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCAmelCase = """gelu_new"""
else:
lowerCAmelCase = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowerCAmelCase = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
lowerCAmelCase = """gpt2"""
lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
lowerCAmelCase = type(_A ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(_A , """pytorch_model.bin""" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 272 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''mvp'''
UpperCAmelCase_ : Any = ['''past_key_values''']
UpperCAmelCase_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , __lowerCAmelCase=50267 , __lowerCAmelCase=1024 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1024 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=100 , __lowerCAmelCase=800 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = classifier_dropout
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = use_prompt
lowerCAmelCase = prompt_length
lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowerCAmelCase):
lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""")
| 272 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowercase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase_ : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase_ : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = ZeroShotClassificationPipeline(
model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , candidate_labels=["""polics""", """health"""])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""")
self.assertEqual(__lowerCAmelCase , {"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase)]})
# No kwarg
lowerCAmelCase = classifier("""Who are you voting for in 2020?""" , ["""politics"""])
self.assertEqual(__lowerCAmelCase , {"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase)]})
lowerCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""])
self.assertEqual(__lowerCAmelCase , {"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase)]})
lowerCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""")
self.assertEqual(
__lowerCAmelCase , {"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""])) , 1.0)
lowerCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""])
self.assertEqual(
__lowerCAmelCase , {"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""])) , 1.0)
lowerCAmelCase = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""")
self.assertEqual(__lowerCAmelCase , {"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase)]})
# https://github.com/huggingface/transformers/issues/13846
lowerCAmelCase = classifier(["""I am happy"""] , ["""positive""", """negative"""])
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)]}
for i in range(1)
] , )
lowerCAmelCase = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""])
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase), """labels""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)], """scores""": [ANY(__lowerCAmelCase), ANY(__lowerCAmelCase)]}
for i in range(2)
] , )
with self.assertRaises(__lowerCAmelCase):
classifier("""""" , candidate_labels="""politics""")
with self.assertRaises(__lowerCAmelCase):
classifier(__lowerCAmelCase , candidate_labels="""politics""")
with self.assertRaises(__lowerCAmelCase):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""")
with self.assertRaises(__lowerCAmelCase):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__lowerCAmelCase)
with self.assertRaises(__lowerCAmelCase):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__lowerCAmelCase):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__lowerCAmelCase , )
self.run_entailment_id(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = zero_shot_classifier.model.config
lowerCAmelCase = config.labelaid
lowerCAmelCase = zero_shot_classifier.entailment_id
lowerCAmelCase = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
lowerCAmelCase = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
lowerCAmelCase = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
lowerCAmelCase = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
lowerCAmelCase = original_labelaid
self.assertEqual(__lowerCAmelCase , zero_shot_classifier.entailment_id)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""])
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
lowerCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
lowerCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""")
lowerCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
lowerCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""")
lowerCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
lowerCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__lowerCAmelCase , )
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 272 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: int = 2000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(n + 1 )]
lowerCAmelCase = 1
lowerCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _A ):
lowerCAmelCase = 1
lowerCAmelCase = 0
for i in range(_A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 1 |
'''simple docstring'''
from collections import namedtuple
__lowercase = namedtuple('''from_to''', '''from_ to''')
__lowercase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1_0_0_0),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def snake_case__ ( _A: float , _A: str , _A: str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 1 |
'''simple docstring'''
__lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowercase = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def snake_case__ ( _A: int , _A: int , _A: int ) -> str:
'''simple docstring'''
assert len(str(_A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCAmelCase = year // 100
lowerCAmelCase = (5 * (century % 4) + 2) % 7
lowerCAmelCase = year % 100
lowerCAmelCase = centurian % 12
lowerCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
__lowercase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
__lowercase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
__lowercase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__lowercase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__lowercase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Tuple = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowercase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__lowercase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__lowercase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCAmelCase__ )
class a__:
'''simple docstring'''
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
elif titles is None or texts is None:
lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = titles if not isinstance(__lowerCAmelCase , __lowerCAmelCase) else [titles]
lowerCAmelCase = texts if not isinstance(__lowerCAmelCase , __lowerCAmelCase) else [texts]
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = questions if not isinstance(__lowerCAmelCase , __lowerCAmelCase) else [questions] * n_passages
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCAmelCase)} titles and {len(__lowerCAmelCase)} texts.")
lowerCAmelCase = super().__call__(__lowerCAmelCase , __lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase)["""input_ids"""]
lowerCAmelCase = super().__call__(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase)["""input_ids"""]
lowerCAmelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCAmelCase , __lowerCAmelCase)
]
}
if return_attention_mask is not False:
lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowerCAmelCase = attention_mask
return self.pad(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = 64 , __lowerCAmelCase = 4 , ):
"""simple docstring"""
lowerCAmelCase = reader_input["""input_ids"""]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = reader_output[:3]
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = sorted(range(__lowerCAmelCase) , reverse=__lowerCAmelCase , key=relevance_logits.__getitem__)
lowerCAmelCase = []
for doc_id in sorted_docs:
lowerCAmelCase = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase = sequence_ids.index(self.pad_token_id)
else:
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCAmelCase , top_spans=__lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCAmelCase , start_index=__lowerCAmelCase , end_index=__lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(__lowerCAmelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = []
for start_index, start_score in enumerate(__lowerCAmelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowerCAmelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase: x[1] , reverse=__lowerCAmelCase)
lowerCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]")
lowerCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(__lowerCAmelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: list ) -> list:
'''simple docstring'''
lowerCAmelCase = len(_A )
for _ in range(_A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase , lowerCAmelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__lowercase = list(range(1_0, 0, -1))
print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 272 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowercase = False
__lowercase = True
__lowercase = False
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowercase = parser.parse_args()
__lowercase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__lowercase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__lowercase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__lowercase = reader.read()
__lowercase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__lowercase = UNetaDModel(**config)
else:
__lowercase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__lowercase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowercase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowercase = config[key]
del config[key]
__lowercase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__lowercase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__lowercase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__lowercase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__lowercase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__lowercase = param_value
__lowercase = True
if not has_changed:
__lowercase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 272 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = UnCLIPImageVariationPipeline
UpperCAmelCase_ : Optional[Any] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCAmelCase_ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ : Dict = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCAmelCase_ : Optional[Any] = False
@property
def a_ ( self):
"""simple docstring"""
return 32
@property
def a_ ( self):
"""simple docstring"""
return 32
@property
def a_ ( self):
"""simple docstring"""
return self.time_input_dim
@property
def a_ ( self):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self):
"""simple docstring"""
return 100
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
lowerCAmelCase = UnCLIPTextProjModel(**__lowerCAmelCase)
return model
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
lowerCAmelCase = UNetaDConditionModel(**__lowerCAmelCase)
return model
@property
def a_ ( self):
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(1)
lowerCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_decoder
lowerCAmelCase = self.dummy_text_proj
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_super_res_first
lowerCAmelCase = self.dummy_super_res_last
lowerCAmelCase = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
lowerCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
lowerCAmelCase = CLIPImageProcessor(crop_size=32 , size=32)
lowerCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
if pil_image:
lowerCAmelCase = input_image * 0.5 + 0.5
lowerCAmelCase = input_image.clamp(0 , 1)
lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowerCAmelCase = DiffusionPipeline.numpy_to_pil(__lowerCAmelCase)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
lowerCAmelCase = pipe(
**__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = torch.device("""cpu""")
class a__:
'''simple docstring'''
UpperCAmelCase_ : Dict = 1
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(0)
lowerCAmelCase = pipe.decoder.dtype
lowerCAmelCase = 1
lowerCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase = pipe.prepare_latents(
__lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler())
lowerCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase = pipe.prepare_latents(
__lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler())
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
lowerCAmelCase = pipe(
**__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase).images
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase)
# Don't pass image, instead pass embedding
lowerCAmelCase = pipeline_inputs.pop("""image""")
lowerCAmelCase = pipe.image_encoder(__lowerCAmelCase).image_embeds
lowerCAmelCase = pipe(
**__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase , image_embeddings=__lowerCAmelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1E-4
@skip_mps
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCAmelCase , expected_max_diff=__lowerCAmelCase)
@skip_mps
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = True
lowerCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__lowerCAmelCase)
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""")
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""")
lowerCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa)
lowerCAmelCase = pipeline.to(__lowerCAmelCase)
pipeline.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.Generator(device="""cpu""").manual_seed(0)
lowerCAmelCase = pipeline(
__lowerCAmelCase , generator=__lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase , 15)
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.2 , __lowerCAmelCase=0.2):
"""simple docstring"""
lowerCAmelCase = bp_numa
lowerCAmelCase = bp_numa
lowerCAmelCase = bp_numa
lowerCAmelCase = conva_get[:2]
lowerCAmelCase = conva_get[2]
lowerCAmelCase = size_pa
lowerCAmelCase = rate_w
lowerCAmelCase = rate_t
lowerCAmelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
lowerCAmelCase = -2 * np.random.rand(self.conva[1]) + 1
lowerCAmelCase = -2 * np.random.rand(self.num_bpa) + 1
lowerCAmelCase = -2 * np.random.rand(self.num_bpa) + 1
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(__lowerCAmelCase , """wb""") as f:
pickle.dump(__lowerCAmelCase , __lowerCAmelCase)
print(f"Model saved: {save_path}")
@classmethod
def a_ ( cls , __lowerCAmelCase):
"""simple docstring"""
with open(__lowerCAmelCase , """rb""") as f:
lowerCAmelCase = pickle.load(__lowerCAmelCase) # noqa: S301
lowerCAmelCase = model_dic.get("""conv1""")
conv_get.append(model_dic.get("""step_conv1"""))
lowerCAmelCase = model_dic.get("""size_pooling1""")
lowerCAmelCase = model_dic.get("""num_bp1""")
lowerCAmelCase = model_dic.get("""num_bp2""")
lowerCAmelCase = model_dic.get("""num_bp3""")
lowerCAmelCase = model_dic.get("""rate_weight""")
lowerCAmelCase = model_dic.get("""rate_thre""")
# create model instance
lowerCAmelCase = CNN(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# modify model parameter
lowerCAmelCase = model_dic.get("""w_conv1""")
lowerCAmelCase = model_dic.get("""wkj""")
lowerCAmelCase = model_dic.get("""vji""")
lowerCAmelCase = model_dic.get("""thre_conv1""")
lowerCAmelCase = model_dic.get("""thre_bp2""")
lowerCAmelCase = model_dic.get("""thre_bp3""")
return conv_ins
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return round(__lowerCAmelCase , 3)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = convs[0]
lowerCAmelCase = convs[1]
lowerCAmelCase = np.shape(__lowerCAmelCase)[0]
# get the data slice of original image data, data_focus
lowerCAmelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCAmelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCAmelCase):
lowerCAmelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCAmelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase = []
lowerCAmelCase = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCAmelCase):
lowerCAmelCase = []
for i_focus in range(len(__lowerCAmelCase)):
lowerCAmelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCAmelCase))
lowerCAmelCase = np.asmatrix(__lowerCAmelCase).reshape(
__lowerCAmelCase , __lowerCAmelCase)
data_featuremap.append(__lowerCAmelCase)
# expanding the data slice to One dimenssion
lowerCAmelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCAmelCase))
lowerCAmelCase = np.asarray(__lowerCAmelCase)
return focus_list, data_featuremap
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="average_pool"):
"""simple docstring"""
lowerCAmelCase = len(featuremaps[0])
lowerCAmelCase = int(size_map / size_pooling)
lowerCAmelCase = []
for i_map in range(len(__lowerCAmelCase)):
lowerCAmelCase = featuremaps[i_map]
lowerCAmelCase = []
for i_focus in range(0 , __lowerCAmelCase , __lowerCAmelCase):
for j_focus in range(0 , __lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCAmelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCAmelCase))
lowerCAmelCase = np.asmatrix(__lowerCAmelCase).reshape(__lowerCAmelCase , __lowerCAmelCase)
featuremap_pooled.append(__lowerCAmelCase)
return featuremap_pooled
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for i in range(len(__lowerCAmelCase)):
lowerCAmelCase = np.shape(data[i])
lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1])
lowerCAmelCase = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCAmelCase)
lowerCAmelCase = np.asarray(__lowerCAmelCase)
return data_expanded
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = np.asarray(__lowerCAmelCase)
lowerCAmelCase = np.shape(__lowerCAmelCase)
lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = 0
for i_map in range(__lowerCAmelCase):
lowerCAmelCase = np.ones((size_map, size_map))
for i in range(0 , __lowerCAmelCase , __lowerCAmelCase):
for j in range(0 , __lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = pd_pool[
i_pool
]
lowerCAmelCase = i_pool + 1
lowerCAmelCase = np.multiply(
__lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCAmelCase)
return pd_all
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=bool):
"""simple docstring"""
print("""----------------------Start Training-------------------------""")
print((""" - - Shape: Train_Data """, np.shape(__lowerCAmelCase)))
print((""" - - Shape: Teach_Data """, np.shape(__lowerCAmelCase)))
lowerCAmelCase = 0
lowerCAmelCase = []
lowerCAmelCase = 10000
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCAmelCase)):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase = np.asmatrix(datas_train[p])
lowerCAmelCase = np.asarray(datas_teach[p])
lowerCAmelCase , lowerCAmelCase = self.convolute(
__lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase = self.pooling(__lowerCAmelCase , self.size_poolinga)
lowerCAmelCase = np.shape(__lowerCAmelCase)
lowerCAmelCase = self._expand(__lowerCAmelCase)
lowerCAmelCase = data_bp_input
lowerCAmelCase = np.dot(__lowerCAmelCase , self.vji.T) - self.thre_bpa
lowerCAmelCase = self.sig(__lowerCAmelCase)
lowerCAmelCase = np.dot(__lowerCAmelCase , self.wkj.T) - self.thre_bpa
lowerCAmelCase = self.sig(__lowerCAmelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCAmelCase , (1 - bp_outa)))
lowerCAmelCase = np.multiply(
np.dot(__lowerCAmelCase , self.wkj) , np.multiply(__lowerCAmelCase , (1 - bp_outa)))
lowerCAmelCase = np.dot(__lowerCAmelCase , self.vji)
lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase = self._calculate_gradient_from_pool(
__lowerCAmelCase , __lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv])
lowerCAmelCase = self.rate_weight * np.dot(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
lowerCAmelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase = rp + 1
lowerCAmelCase = error_count / patterns
all_mse.append(__lowerCAmelCase)
def draw_error():
lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCAmelCase , """+-""")
plt.plot(__lowerCAmelCase , """r--""")
plt.xlabel("""Learning Times""")
plt.ylabel("""All_mse""")
plt.grid(__lowerCAmelCase , alpha=0.5)
plt.show()
print("""------------------Training Complished---------------------""")
print((""" - - Training epoch: """, rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
print("""-------------------Start Testing-------------------------""")
print((""" - - Shape: Test_Data """, np.shape(__lowerCAmelCase)))
for p in range(len(__lowerCAmelCase)):
lowerCAmelCase = np.asmatrix(datas_test[p])
lowerCAmelCase , lowerCAmelCase = self.convolute(
__lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase = self.pooling(__lowerCAmelCase , self.size_poolinga)
lowerCAmelCase = self._expand(__lowerCAmelCase)
lowerCAmelCase = data_bp_input
lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase = self.sig(__lowerCAmelCase)
lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase = self.sig(__lowerCAmelCase)
produce_out.extend(bp_outa.getA().tolist())
lowerCAmelCase = [list(map(self.do_round , __lowerCAmelCase)) for each in produce_out]
return np.asarray(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = np.asmatrix(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = self.convolute(
__lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase = self.pooling(__lowerCAmelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 272 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 1 |
'''simple docstring'''
import functools
def snake_case__ ( _A: list[int] , _A: list[int] ) -> int:
'''simple docstring'''
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_A ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowerCAmelCase = set(_A )
@functools.cache
def dynamic_programming(_A: int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def snake_case__ ( _A: int = 1000000 , _A: int = 10 ) -> int:
'''simple docstring'''
lowerCAmelCase = defaultdict(_A )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_A , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 1 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: int = 1000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 3
lowerCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 256
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = cva.imread(__lowerCAmelCase , 0)
lowerCAmelCase = copy.deepcopy(self.img)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""")
lowerCAmelCase = np.sum(__lowerCAmelCase)
for i in range(len(__lowerCAmelCase)):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last)
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(__lowerCAmelCase)
lowerCAmelCase = int(np.ma.count(self.img) / self.img[1].size)
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img)
def a_ ( self):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256])
def a_ ( self):
"""simple docstring"""
cva.imshow("""Output-Image""" , self.img)
cva.imshow("""Input-Image""" , self.original_image)
cva.waitKey(5000)
cva.destroyAllWindows()
if __name__ == "__main__":
__lowercase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__lowercase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 272 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
__lowercase = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
__lowercase = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
__lowercase = BeautifulSoup(res.text, '''html.parser''')
__lowercase = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 272 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 1 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = config_class
lowerCAmelCase = has_text_modality
lowerCAmelCase = kwargs
lowerCAmelCase = common_properties
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
lowerCAmelCase = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase) , msg=f"`{prop}` does not exist")
# Test that config has the common properties as setter
for idx, name in enumerate(__lowerCAmelCase):
try:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
self.parent.assertEqual(
getattr(__lowerCAmelCase , __lowerCAmelCase) , __lowerCAmelCase , msg=f"`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase)}")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__lowerCAmelCase):
try:
lowerCAmelCase = self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(__lowerCAmelCase , __lowerCAmelCase) , __lowerCAmelCase , msg=f"`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase)}")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
lowerCAmelCase = json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(__lowerCAmelCase , """config.json""")
config_first.to_json_file(__lowerCAmelCase)
lowerCAmelCase = self.config_class.from_json_file(__lowerCAmelCase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = self.config_class.from_pretrained(__lowerCAmelCase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
lowerCAmelCase = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase)
config_first.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = self.config_class.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5)
self.parent.assertEqual(len(config.idalabel) , 5)
self.parent.assertEqual(len(config.labelaid) , 5)
lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel) , 3)
self.parent.assertEqual(len(config.labelaid) , 3)
def a_ ( self):
"""simple docstring"""
if self.config_class.is_composition:
return
lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(__lowerCAmelCase)
lowerCAmelCase = self.config_class(**__lowerCAmelCase)
lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa))
elif getattr(__lowerCAmelCase , __lowerCAmelCase) != value:
wrong_values.append((key, getattr(__lowerCAmelCase , __lowerCAmelCase), value))
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = """\n""".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values])
raise ValueError(f"The following keys were not properly set in the config:\n{errors}")
def a_ ( self):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 272 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PIL.Image.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
lowerCAmelCase = size if size is not None else {"""height""": 256, """width""": 256}
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""")
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PIL.Image.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return resize(
__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(__lowerCAmelCase)
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""")
lowerCAmelCase = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(__lowerCAmelCase) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase) for image in images]
lowerCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 272 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = CLIPConfig
UpperCAmelCase_ : int = ['''CLIPEncoderLayer''']
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase)
lowerCAmelCase = CLIPVisionModelWithProjection(config.vision_config)
lowerCAmelCase = nn.Linear(config.vision_config.projection_dim , 1)
lowerCAmelCase = nn.Linear(config.vision_config.projection_dim , 1)
@torch.no_grad()
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.5 , __lowerCAmelCase=0.5):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[0]
lowerCAmelCase = self.p_head(__lowerCAmelCase)
lowerCAmelCase = nsfw_detected.flatten()
lowerCAmelCase = nsfw_detected > p_threshold
lowerCAmelCase = nsfw_detected.tolist()
if any(__lowerCAmelCase):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""")
for idx, nsfw_detected_ in enumerate(__lowerCAmelCase):
if nsfw_detected_:
lowerCAmelCase = np.zeros(images[idx].shape)
lowerCAmelCase = self.w_head(__lowerCAmelCase)
lowerCAmelCase = watermark_detected.flatten()
lowerCAmelCase = watermark_detected > w_threshold
lowerCAmelCase = watermark_detected.tolist()
if any(__lowerCAmelCase):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""")
for idx, watermark_detected_ in enumerate(__lowerCAmelCase):
if watermark_detected_:
lowerCAmelCase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = 8 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_pad
lowerCAmelCase = pad_size
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = get_image_size(__lowerCAmelCase)
lowerCAmelCase = (old_height // size + 1) * size - old_height
lowerCAmelCase = (old_width // size + 1) * size - old_width
return pad(__lowerCAmelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_pad if do_pad is not None else self.do_pad
lowerCAmelCase = pad_size if pad_size is not None else self.pad_size
lowerCAmelCase = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(__lowerCAmelCase) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase) for image in images]
if do_pad:
lowerCAmelCase = [self.pad(__lowerCAmelCase , size=__lowerCAmelCase) for image in images]
lowerCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 272 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=1000 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(__lowerCAmelCase)
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : str = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Tuple = 1_0
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@unittest.skip("""Onnx compliancy broke with TF 2.10""")
def a_ ( self):
"""simple docstring"""
pass
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""")
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-3))
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __lowerCAmelCase , atol=1E-3))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=tf.convert_to_tensor([1, 1]) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , __lowerCAmelCase)
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""")
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape , __lowerCAmelCase)
self.assertEqual(outputs.end_logits.shape , __lowerCAmelCase)
| 272 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=8 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=16 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=36 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_config()
lowerCAmelCase = 300
return config
def a_ ( self):
"""simple docstring"""
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = MraModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = MraModel(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = MraForMaskedLM(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = MraForQuestionAnswering(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MraForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MraForTokenClassification(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_choices
lowerCAmelCase = MraForMultipleChoice(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Any = ()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MraModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MraModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@unittest.skip(reason="""MRA does not output attentions""")
def a_ ( self):
"""simple docstring"""
return
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""")
lowerCAmelCase = torch.arange(256).unsqueeze(0)
with torch.no_grad():
lowerCAmelCase = model(__lowerCAmelCase)[0]
lowerCAmelCase = torch.Size((1, 256, 768))
self.assertEqual(output.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""")
lowerCAmelCase = torch.arange(256).unsqueeze(0)
with torch.no_grad():
lowerCAmelCase = model(__lowerCAmelCase)[0]
lowerCAmelCase = 50265
lowerCAmelCase = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""")
lowerCAmelCase = torch.arange(4096).unsqueeze(0)
with torch.no_grad():
lowerCAmelCase = model(__lowerCAmelCase)[0]
lowerCAmelCase = 50265
lowerCAmelCase = torch.Size((1, 4096, vocab_size))
self.assertEqual(output.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4))
| 272 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=2 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFDeiTModel(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = TFDeiTForMaskedImageModeling(__lowerCAmelCase)
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = TFDeiTForImageClassification(__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = TFDeiTForImageClassification(__lowerCAmelCase)
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : Any = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFDeiTModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
lowerCAmelCase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a_ ( self):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFDeiTModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""")
if is_vision_available()
else None
)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""")
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""")
# forward pass
lowerCAmelCase = model(**__lowerCAmelCase)
# verify the logits
lowerCAmelCase = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
lowerCAmelCase = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4))
| 272 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.0 , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = "layer_norm" , __lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = only_cross_attention
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.")
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase = AdaLayerNorm(__lowerCAmelCase , __lowerCAmelCase)
elif self.use_ada_layer_norm_zero:
lowerCAmelCase = AdaLayerNormZero(__lowerCAmelCase , __lowerCAmelCase)
else:
lowerCAmelCase = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
lowerCAmelCase = Attention(
query_dim=__lowerCAmelCase , heads=__lowerCAmelCase , dim_head=__lowerCAmelCase , dropout=__lowerCAmelCase , bias=__lowerCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase = (
AdaLayerNorm(__lowerCAmelCase , __lowerCAmelCase)
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
)
lowerCAmelCase = Attention(
query_dim=__lowerCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCAmelCase , dim_head=__lowerCAmelCase , dropout=__lowerCAmelCase , bias=__lowerCAmelCase , upcast_attention=__lowerCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase = None
lowerCAmelCase = None
# 3. Feed-forward
lowerCAmelCase = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
lowerCAmelCase = FeedForward(__lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn=__lowerCAmelCase , final_dropout=__lowerCAmelCase)
# let chunk size default to None
lowerCAmelCase = None
lowerCAmelCase = 0
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = chunk_size
lowerCAmelCase = dim
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
lowerCAmelCase = self.norma(__lowerCAmelCase , __lowerCAmelCase)
elif self.use_ada_layer_norm_zero:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.norma(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hidden_dtype=hidden_states.dtype)
else:
lowerCAmelCase = self.norma(__lowerCAmelCase)
lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase = self.attna(
__lowerCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_msa.unsqueeze(1) * attn_output
lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase = (
self.norma(__lowerCAmelCase , __lowerCAmelCase) if self.use_ada_layer_norm else self.norma(__lowerCAmelCase)
)
lowerCAmelCase = self.attna(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase = self.norma(__lowerCAmelCase)
if self.use_ada_layer_norm_zero:
lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.")
lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase = torch.cat(
[self.ff(__lowerCAmelCase) for hid_slice in norm_hidden_states.chunk(__lowerCAmelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
lowerCAmelCase = self.ff(__lowerCAmelCase)
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_mlp.unsqueeze(1) * ff_output
lowerCAmelCase = ff_output + hidden_states
return hidden_states
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = "geglu" , __lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = int(dim * mult)
lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase = GELU(__lowerCAmelCase , __lowerCAmelCase)
if activation_fn == "gelu-approximate":
lowerCAmelCase = GELU(__lowerCAmelCase , __lowerCAmelCase , approximate="""tanh""")
elif activation_fn == "geglu":
lowerCAmelCase = GEGLU(__lowerCAmelCase , __lowerCAmelCase)
elif activation_fn == "geglu-approximate":
lowerCAmelCase = ApproximateGELU(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = nn.ModuleList([])
# project in
self.net.append(__lowerCAmelCase)
# project dropout
self.net.append(nn.Dropout(__lowerCAmelCase))
# project out
self.net.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCAmelCase))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
for module in self.net:
lowerCAmelCase = module(__lowerCAmelCase)
return hidden_states
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "none"):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = approximate
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.proj(__lowerCAmelCase)
lowerCAmelCase = self.gelu(__lowerCAmelCase)
return hidden_states
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Linear(__lowerCAmelCase , dim_out * 2)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.proj(__lowerCAmelCase).chunk(2 , dim=-1)
return hidden_states * self.gelu(__lowerCAmelCase)
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.proj(__lowerCAmelCase)
return x * torch.sigmoid(1.702 * x)
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(__lowerCAmelCase , embedding_dim * 2)
lowerCAmelCase = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.linear(self.silu(self.emb(__lowerCAmelCase)))
lowerCAmelCase , lowerCAmelCase = torch.chunk(__lowerCAmelCase , 2)
lowerCAmelCase = self.norm(__lowerCAmelCase) * (1 + scale) + shift
return x
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__()
lowerCAmelCase = CombinedTimestepLabelEmbeddings(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(__lowerCAmelCase , 6 * embedding_dim , bias=__lowerCAmelCase)
lowerCAmelCase = nn.LayerNorm(__lowerCAmelCase , elementwise_affine=__lowerCAmelCase , eps=1E-6)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
lowerCAmelCase = self.linear(self.silu(self.emb(__lowerCAmelCase , __lowerCAmelCase , hidden_dtype=__lowerCAmelCase)))
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = emb.chunk(6 , dim=1)
lowerCAmelCase = self.norm(__lowerCAmelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 1E-5):
"""simple docstring"""
super().__init__()
lowerCAmelCase = num_groups
lowerCAmelCase = eps
if act_fn is None:
lowerCAmelCase = None
else:
lowerCAmelCase = get_activation(__lowerCAmelCase)
lowerCAmelCase = nn.Linear(__lowerCAmelCase , out_dim * 2)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if self.act:
lowerCAmelCase = self.act(__lowerCAmelCase)
lowerCAmelCase = self.linear(__lowerCAmelCase)
lowerCAmelCase = emb[:, :, None, None]
lowerCAmelCase , lowerCAmelCase = emb.chunk(2 , dim=1)
lowerCAmelCase = F.group_norm(__lowerCAmelCase , self.num_groups , eps=self.eps)
lowerCAmelCase = x * (1 + scale) + shift
return x
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
__lowercase = '''us-east-1''' # defaults region
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
UpperCAmelCase_ : Union[str, Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
UpperCAmelCase_ : Optional[int] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def a_ ( self):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def a_ ( self):
"""simple docstring"""
return f"{self.framework}-transfromers-test"
@property
def a_ ( self):
"""simple docstring"""
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def a_ ( self):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def snake_case__ ( _A: List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = torch.nn.Linear(2 , 4 )
lowerCAmelCase = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase = torch.optim.lr_scheduler.OneCycleLR(_A , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def snake_case__ ( _A: Any ) -> Any:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def snake_case__ ( _A: str ) -> Any:
'''simple docstring'''
lowerCAmelCase = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_A )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
@require_cuda
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowerCAmelCase):
lowerCAmelCase = Accelerator(cpu=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase = GradientState()
assert state.num_steps == 1
lowerCAmelCase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase = False
assert state.sync_gradients is False
GradientState._reset_state()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = create_components()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def a_ ( self):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowerCAmelCase , **__lowerCAmelCase):
pass
with patch("""torch.cuda.set_device""" , __lowerCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64"""):
lowerCAmelCase = Accelerator()
self.assertEqual(str(accelerator.state.device) , """cuda:64""")
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = get_signature(__lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase)
# make sure random weights don't match
load_random_weights(__lowerCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(__lowerCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase)) < 1E-3)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = get_signature(__lowerCAmelCase)
# saving hook
def save_config(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__lowerCAmelCase , """data.json""") , """w""") as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
# loading hook
def load_config(__lowerCAmelCase , __lowerCAmelCase):
with open(os.path.join(__lowerCAmelCase , """data.json""") , """r""") as f:
lowerCAmelCase = json.load(__lowerCAmelCase)
lowerCAmelCase = config["""class_name"""]
lowerCAmelCase = accelerator.register_save_state_pre_hook(__lowerCAmelCase)
lowerCAmelCase = accelerator.register_load_state_pre_hook(__lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(__lowerCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase)) > 1E-3)
# random class name to verify correct one is loaded
lowerCAmelCase = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(__lowerCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(__lowerCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase)) > 1E-3)
# random class name to verify correct one is loaded
lowerCAmelCase = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowerCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = create_components()
lowerCAmelCase = None
# This should work
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
self.assertTrue(dummy_obj is None)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Accelerator()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = create_components()
lowerCAmelCase = [1, 2, 3]
# This should work
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(
getattr(__lowerCAmelCase , """_is_accelerate_prepared""" , __lowerCAmelCase) , __lowerCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__lowerCAmelCase , """_is_accelerate_prepared""" , __lowerCAmelCase) , __lowerCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowerCAmelCase , """_is_accelerate_prepared""" , __lowerCAmelCase) , __lowerCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowerCAmelCase , """_is_accelerate_prepared""" , __lowerCAmelCase) , __lowerCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowerCAmelCase , """_is_accelerate_prepared""" , __lowerCAmelCase) , __lowerCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowerCAmelCase , """_is_accelerate_prepared""" , __lowerCAmelCase) , __lowerCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def a_ ( self):
"""simple docstring"""
from transformers import AutoModelForCausalLM
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowerCAmelCase , device_map={"""""": 0} , )
lowerCAmelCase = Accelerator()
# This should work
lowerCAmelCase = accelerator.prepare(__lowerCAmelCase)
@slow
@require_bnb
def a_ ( self):
"""simple docstring"""
from transformers import AutoModelForCausalLM
lowerCAmelCase = Accelerator()
with init_empty_weights():
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
lowerCAmelCase = infer_auto_device_map(__lowerCAmelCase)
lowerCAmelCase = """cpu"""
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__lowerCAmelCase , load_in_abit=__lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=__lowerCAmelCase)
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase):
lowerCAmelCase = accelerator.prepare(__lowerCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def a_ ( self):
"""simple docstring"""
from transformers import AutoModelForCausalLM
lowerCAmelCase = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
lowerCAmelCase = infer_auto_device_map(__lowerCAmelCase)
lowerCAmelCase = 1
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
lowerCAmelCase = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase):
lowerCAmelCase = accelerator.prepare(__lowerCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def a_ ( self):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
lowerCAmelCase = infer_auto_device_map(__lowerCAmelCase)
lowerCAmelCase = 1
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
lowerCAmelCase = Accelerator()
# This should work
lowerCAmelCase = accelerator.prepare(__lowerCAmelCase)
@require_cuda
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = torch.nn.Linear(10 , 10)
lowerCAmelCase = torch.optim.SGD(model.parameters() , lr=0.01)
lowerCAmelCase = Accelerator(cpu=__lowerCAmelCase)
lowerCAmelCase = accelerator.prepare(__lowerCAmelCase)
| 272 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 272 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowercase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case__ ( _A: Optional[int] , _A: List[str] , _A: Dict , _A: Union[str, Any] , _A: Any ) -> Any:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase = getattr(_A , _A )
if weight_type is not None:
lowerCAmelCase = getattr(_A , _A ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case__ ( _A: Optional[Any] , _A: Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase = None
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase = True
elif name.split(""".""" )[0] == "proj":
lowerCAmelCase = fairseq_model.proj
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_A )[0].split(""".""" )[-2]
lowerCAmelCase = mapped_key.replace("""*""" , _A )
if "weight_g" in name:
lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase = """weight_v"""
elif "bias" in name:
lowerCAmelCase = """bias"""
elif "weight" in name:
lowerCAmelCase = """weight"""
else:
lowerCAmelCase = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def snake_case__ ( _A: Optional[Any] , _A: List[str] , _A: Union[str, Any] , _A: Any , _A: List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase = name.split(""".""" )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_A )
def snake_case__ ( _A: Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase = emb.weight.data
return lin_layer
def snake_case__ ( _A: List[Any] ) -> List[str]:
'''simple docstring'''
with open(_A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [line.split(""" """ )[0] for line in lines]
lowerCAmelCase = len(_A )
lowerCAmelCase = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def snake_case__ ( _A: List[str] , _A: List[str] , _A: List[str] , _A: Tuple , _A: Optional[int] , _A: Optional[int] , _A: Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = WavaVecaConfig.from_pretrained(_A )
lowerCAmelCase = SpeechaTextaConfig.from_pretrained(
_A , vocab_size=_A , decoder_layers=_A , do_stable_layer_norm=_A )
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowerCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase = WavaVecaModel(_A )
lowerCAmelCase = recursively_load_weights_wavaveca(model.encoder , _A )
lowerCAmelCase = SpeechaTextaForCausalLM(_A )
lowerCAmelCase , lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowerCAmelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
lowerCAmelCase = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
lowerCAmelCase = False
# add projection layer
lowerCAmelCase = nn.Parameter(projection_layer.weight )
lowerCAmelCase = nn.Parameter(projection_layer.bias )
lowerCAmelCase = create_vocab_dict(_A )
with open(os.path.join(_A , """vocab.json""" ) , """w""" ) as fp:
json.dump(_A , _A )
lowerCAmelCase = SpeechaTextaTokenizer(os.path.join(_A , """vocab.json""" ) )
tokenizer.save_pretrained(_A )
lowerCAmelCase = hf_wavavec.config.to_dict()
lowerCAmelCase = tokenizer.pad_token_id
lowerCAmelCase = tokenizer.bos_token_id
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = """speech_to_text_2"""
lowerCAmelCase = """wav2vec2"""
lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__lowercase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None):
"""simple docstring"""
super().__init__()
lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCAmelCase = torch.zeros(__lowerCAmelCase , __lowerCAmelCase)
else:
lowerCAmelCase = None
lowerCAmelCase = torch.nn.Parameter(__lowerCAmelCase)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : VQModel
UpperCAmelCase_ : CLIPTextModel
UpperCAmelCase_ : CLIPTokenizer
UpperCAmelCase_ : TransformeraDModel
UpperCAmelCase_ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase_ : VQDiffusionScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase , transformer=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(__lowerCAmelCase) if isinstance(__lowerCAmelCase , __lowerCAmelCase) else 1
# get prompt text embeddings
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase)
# duplicate text embeddings for each generation per prompt
lowerCAmelCase = prompt_embeds.repeat_interleave(__lowerCAmelCase , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCAmelCase = negative_prompt_embeds.unsqueeze(0).repeat(__lowerCAmelCase , 1 , 1)
else:
lowerCAmelCase = [""""""] * batch_size
lowerCAmelCase = text_input_ids.shape[-1]
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase = negative_prompt_embeds.shape[1]
lowerCAmelCase = negative_prompt_embeds.repeat(1 , __lowerCAmelCase , 1)
lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 100 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = len(__lowerCAmelCase)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase)}")
lowerCAmelCase = batch_size * num_images_per_prompt
lowerCAmelCase = guidance_scale > 1.0
lowerCAmelCase = self._encode_prompt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCAmelCase)}.")
# get the initial completely masked latents unless the user supplied it
lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCAmelCase = self.transformer.num_vector_embeds - 1
lowerCAmelCase = torch.full(__lowerCAmelCase , __lowerCAmelCase).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
lowerCAmelCase = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase , device=self.device)
lowerCAmelCase = self.scheduler.timesteps.to(self.device)
lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase)):
# expand the sample if we are doing classifier free guidance
lowerCAmelCase = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCAmelCase = self.transformer(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase).sample
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase = model_output.chunk(2)
lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase , dim=1 , keepdim=__lowerCAmelCase)
lowerCAmelCase = self.truncate(__lowerCAmelCase , __lowerCAmelCase)
# remove `log(0)`'s (`-inf`s)
lowerCAmelCase = model_output.clamp(-70)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , generator=__lowerCAmelCase).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.vqvae.config.vq_embed_dim
lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase , shape=__lowerCAmelCase)
lowerCAmelCase = self.vqvae.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase).sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1)
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(__lowerCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = torch.sort(__lowerCAmelCase , 1 , descending=__lowerCAmelCase)
lowerCAmelCase = torch.exp(__lowerCAmelCase)
lowerCAmelCase = sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , __lowerCAmelCase)
lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1)
lowerCAmelCase = keep_mask[:, :-1, :]
lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1))
lowerCAmelCase = log_p_x_0.clone()
lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 272 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 1 |
'''simple docstring'''
import qiskit
def snake_case__ ( _A: int , _A: int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase = qiskit.QuantumCircuit(_A , _A )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCAmelCase = qiskit.execute(_A , _A , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_A )
if __name__ == "__main__":
__lowercase = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def snake_case__ ( _A: str , _A: str ) -> str:
'''simple docstring'''
lowerCAmelCase = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=_A , output_all_encodings=_A , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , _A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase = _load_vocab(_A , _A , _A , cls=_A )
lowerCAmelCase = nlp.model.BERTModel(
_A , len(_A ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=_A , use_token_type_embed=_A , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=_A , use_decoder=_A , )
original_bort.load_parameters(_A , cast_dtype=_A , ignore_extra=_A )
lowerCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(_A ),
}
lowerCAmelCase = BertConfig.from_dict(_A )
lowerCAmelCase = BertForMaskedLM(_A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_A: int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_A: Tuple , _A: str ):
lowerCAmelCase = hf_param.shape
lowerCAmelCase = to_torch(params[gluon_param] )
lowerCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase = layer.attention.self
lowerCAmelCase = check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
lowerCAmelCase = check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
lowerCAmelCase = check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
lowerCAmelCase = check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
lowerCAmelCase = check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
lowerCAmelCase = check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
lowerCAmelCase = layer.attention.output
lowerCAmelCase = check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
lowerCAmelCase = check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
lowerCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
lowerCAmelCase = layer.intermediate
lowerCAmelCase = check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
lowerCAmelCase = check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
lowerCAmelCase = layer.output
lowerCAmelCase = check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
lowerCAmelCase = check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
lowerCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase = tokenizer.encode_plus(_A )["""input_ids"""]
# Get gluon output
lowerCAmelCase = mx.nd.array([input_ids] )
lowerCAmelCase = original_bort(inputs=_A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_A )
lowerCAmelCase = BertModel.from_pretrained(_A )
hf_bort_model.eval()
lowerCAmelCase = tokenizer.encode_plus(_A , return_tensors="""pt""" )
lowerCAmelCase = hf_bort_model(**_A )[0]
lowerCAmelCase = output_gluon[0].asnumpy()
lowerCAmelCase = output_hf[0].detach().numpy()
lowerCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase = np.allclose(_A , _A , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , _A )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 272 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [True] * limit
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCAmelCase = i * 2
while index < limit:
lowerCAmelCase = False
lowerCAmelCase = index + i
lowerCAmelCase = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = prime_sieve(_A )
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(len(_A ) ):
for j in range(i + length , len(_A ) ):
lowerCAmelCase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase = j - i
lowerCAmelCase = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case__ ( _A: Optional[int] , _A: Union[str, Any] , _A: Dict , _A: Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = s.rsplit(_A , _A )
return new.join(_A )
def snake_case__ ( _A: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case__ ( _A: str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCAmelCase = key.replace(f"{group_key}." , f"{group_key}.group." )
if "res_path" in key:
lowerCAmelCase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowerCAmelCase = rreplace(_A , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowerCAmelCase = rreplace(_A , """.b""" , """.bias""" , 1 )
lowerCAmelCase = value.float()
return upgrade
@torch.no_grad()
def snake_case__ ( _A: Dict , _A: List[str] , _A: str=None , _A: Union[str, Any]=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowerCAmelCase = Encoder()
if os.path.exists(_A ):
lowerCAmelCase = torch.load(_A )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCAmelCase = ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCAmelCase = FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCAmelCase = FlavaImageCodebookConfig()
lowerCAmelCase = FlavaImageCodebook(_A ).eval()
lowerCAmelCase = encoder.state_dict()
lowerCAmelCase = upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(_A )
lowerCAmelCase = count_parameters(_A )
assert torch.allclose(_A , _A , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowercase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 272 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowercase = logging.getLogger(__name__)
def snake_case__ ( _A: torch.nn.Module , _A: BnbQuantizationConfig , _A: Union[str, os.PathLike] = None , _A: Optional[Dict[str, Union[int, str, torch.device]]] = None , _A: Optional[List[str]] = None , _A: Optional[Dict[Union[int, str], Union[int, str]]] = None , _A: Optional[Union[str, os.PathLike]] = None , _A: bool = False , ) -> Dict:
'''simple docstring'''
lowerCAmelCase = bnb_quantization_config.load_in_abit
lowerCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowerCAmelCase = []
# custom device map
if isinstance(_A , _A ) and len(device_map.keys() ) > 1:
lowerCAmelCase = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase = get_keys_to_not_convert(_A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_A )
lowerCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase = []
lowerCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_A )
# compatibility with peft
lowerCAmelCase = load_in_abit
lowerCAmelCase = load_in_abit
lowerCAmelCase = get_parameter_device(_A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowerCAmelCase = replace_with_bnb_layers(_A , _A , modules_to_not_convert=_A )
# convert param to the right dtype
lowerCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowerCAmelCase = getattr(_A , _A , _A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_A ):
param.to(_A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
lowerCAmelCase = replace_with_bnb_layers(
_A , _A , modules_to_not_convert=_A )
lowerCAmelCase = get_quantized_model_device_map(
_A , _A , _A , max_memory=_A , no_split_module_classes=_A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase = True
lowerCAmelCase = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_A , _A , _A , dtype=bnb_quantization_config.torch_dtype , offload_folder=_A , offload_state_dict=_A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_A , device_map=_A , offload_dir=_A )
def snake_case__ ( _A: Union[str, Any] , _A: List[str] , _A: Optional[int]=None , _A: Optional[int]=None , _A: Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_A , _A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowerCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase = {}
lowerCAmelCase = special_dtypes
lowerCAmelCase = no_split_module_classes
lowerCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase = get_balanced_memory(
_A , low_zero=(device_map == """balanced_low_0""") , max_memory=_A , **_A , )
lowerCAmelCase = max_memory
lowerCAmelCase = infer_auto_device_map(_A , **_A )
if isinstance(_A , _A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def snake_case__ ( _A: Tuple , _A: int , _A: List[Any]=None , _A: Union[str, Any]=None ) -> Optional[int]:
'''simple docstring'''
if modules_to_not_convert is None:
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = _replace_with_bnb_layers(
_A , _A , _A , _A )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def snake_case__ ( _A: Optional[Any] , _A: Dict , _A: List[str]=None , _A: Optional[int]=None , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase = []
current_key_name.append(_A )
if isinstance(_A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase = """.""".join(_A )
lowerCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowerCAmelCase = module.weight.data
if module.bias is not None:
lowerCAmelCase = module.bias.data
bnb_module.requires_grad_(_A )
setattr(_A , _A , _A )
lowerCAmelCase = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase , lowerCAmelCase = _replace_with_bnb_layers(
_A , _A , _A , _A )
lowerCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case__ ( _A: Optional[Any] ) -> int:
'''simple docstring'''
with init_empty_weights():
lowerCAmelCase = deepcopy(_A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase = find_tied_parameters(_A )
# For compatibility with Accelerate < 0.18
if isinstance(_A , _A ):
lowerCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase = sum(_A , [] )
lowerCAmelCase = len(_A ) > 0
# Check if it is a base model
lowerCAmelCase = False
if hasattr(_A , """base_model_prefix""" ):
lowerCAmelCase = not hasattr(_A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase = list(model.named_children() )
lowerCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase = set(_A ) - set(_A )
lowerCAmelCase = list(set(_A ) ) + list(_A )
# remove ".weight" from the keys
lowerCAmelCase = [""".weight""", """.bias"""]
lowerCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase = name.replace(_A , """""" )
filtered_module_names.append(_A )
return filtered_module_names
def snake_case__ ( _A: Union[str, Any] ) -> Any:
'''simple docstring'''
for m in model.modules():
if isinstance(_A , bnb.nn.Linearabit ):
return True
return False
def snake_case__ ( _A: nn.Module ) -> Optional[int]:
'''simple docstring'''
return next(parameter.parameters() ).device
def snake_case__ ( _A: int , _A: Tuple , _A: Any , _A: Any , _A: Any , _A: int , _A: Optional[int] ) -> List[str]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_A , _A , 0 , dtype=_A , value=_A )
lowerCAmelCase = param_name
lowerCAmelCase = model
if "." in tensor_name:
lowerCAmelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
lowerCAmelCase = getattr(_A , _A )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
lowerCAmelCase = new_module
lowerCAmelCase = splits[-1]
# offload weights
lowerCAmelCase = False
offload_weight(module._parameters[tensor_name] , _A , _A , index=_A )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _A , index=_A , )
else:
offload_weight(_A , _A , _A , index=_A )
offload_weight(_A , param_name.replace("""weight""" , """SCB""" ) , _A , index=_A )
set_module_tensor_to_device(_A , _A , """meta""" , dtype=_A , value=torch.empty(*param.size() ) )
| 272 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 768 , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Parameter(torch.zeros(1 , __lowerCAmelCase))
lowerCAmelCase = nn.Parameter(torch.ones(1 , __lowerCAmelCase))
def a_ ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = nn.Parameter(self.mean.to(__lowerCAmelCase).to(__lowerCAmelCase))
lowerCAmelCase = nn.Parameter(self.std.to(__lowerCAmelCase).to(__lowerCAmelCase))
return self
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = (embeds * self.std) + self.mean
return embeds
| 272 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''swin'''
UpperCAmelCase_ : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=4 , __lowerCAmelCase=3 , __lowerCAmelCase=96 , __lowerCAmelCase=[2, 2, 6, 2] , __lowerCAmelCase=[3, 6, 12, 24] , __lowerCAmelCase=7 , __lowerCAmelCase=4.0 , __lowerCAmelCase=True , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=False , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=32 , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase) - 1))
lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__lowerCAmelCase) + 1)]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = version.parse('''1.11''' )
@property
def a_ ( self):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def a_ ( self):
"""simple docstring"""
return 1E-4
| 272 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 1 |
'''simple docstring'''
from math import pi
def snake_case__ ( _A: int , _A: int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 272 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( _A: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowercase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCAmelCase = []
for num in range(len(_A ) ):
lowerCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(_A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_A ) == n:
return list_nums
return []
def snake_case__ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _A )
def snake_case__ ( _A: Union[str, Any] , _A: Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase = os.path.join(_A , """test_file.py""" )
with open(_A , """w""" ) as _tmp_file:
_tmp_file.write(_A )
lowerCAmelCase = get_imports(_A )
assert parsed_imports == ["os"]
| 272 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=[1, 1, 2] , __lowerCAmelCase=1 , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=8 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=512 , __lowerCAmelCase=3 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=False , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = block_sizes
lowerCAmelCase = num_decoder_layers
lowerCAmelCase = d_model
lowerCAmelCase = n_head
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = 2
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase = self.num_hidden_layers + 2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelModel(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowerCAmelCase = False
lowerCAmelCase = TFFunnelModel(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowerCAmelCase = False
lowerCAmelCase = TFFunnelModel(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelBaseModel(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowerCAmelCase = False
lowerCAmelCase = TFFunnelBaseModel(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowerCAmelCase = False
lowerCAmelCase = TFFunnelBaseModel(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelForPreTraining(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelForMaskedLM(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFFunnelForSequenceClassification(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFFunnelForMultipleChoice(config=__lowerCAmelCase)
lowerCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowerCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowerCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFFunnelForTokenClassification(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = TFFunnelForQuestionAnswering(config=__lowerCAmelCase)
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : List[Any] = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Union[str, Any] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFFunnelModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase)
@require_tf
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : int = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFFunnelModelTester(self , base=__lowerCAmelCase)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase)
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( _A: list , _A: int ) -> Dict:
'''simple docstring'''
if len(_A ) <= 1 or n <= 1:
return
insert_next(_A , n - 1 )
rec_insertion_sort(_A , n - 1 )
def snake_case__ ( _A: list , _A: int ) -> List[str]:
'''simple docstring'''
if index >= len(_A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(_A , index + 1 )
if __name__ == "__main__":
__lowercase = input('''Enter integers separated by spaces: ''')
__lowercase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 272 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''mobilenet_v2'''
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=224 , __lowerCAmelCase=1.0 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=32 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu6" , __lowerCAmelCase=True , __lowerCAmelCase=0.8 , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.001 , __lowerCAmelCase=255 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase)
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""")
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = depth_multiplier
lowerCAmelCase = depth_divisible_by
lowerCAmelCase = min_depth
lowerCAmelCase = expand_ratio
lowerCAmelCase = output_stride
lowerCAmelCase = first_layer_is_expansion
lowerCAmelCase = finegrained_output
lowerCAmelCase = hidden_act
lowerCAmelCase = tf_padding
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = semantic_loss_ignore_index
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = version.parse('''1.11''' )
@property
def a_ ( self):
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})])
@property
def a_ ( self):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})])
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})])
@property
def a_ ( self):
"""simple docstring"""
return 1E-4
| 272 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 1 |
'''simple docstring'''
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = name
lowerCAmelCase = val
def __str__( self):
"""simple docstring"""
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self , __lowerCAmelCase):
"""simple docstring"""
return self.val < other.val
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = self.build_heap(__lowerCAmelCase)
def __getitem__( self , __lowerCAmelCase):
"""simple docstring"""
return self.get_value(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return (idx - 1) // 2
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return idx * 2 + 1
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return idx * 2 + 2
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.heap_dict[key]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(__lowerCAmelCase) - 1
lowerCAmelCase = self.get_parent_idx(__lowerCAmelCase)
for idx, i in enumerate(__lowerCAmelCase):
lowerCAmelCase = idx
lowerCAmelCase = i.val
for i in range(__lowerCAmelCase , -1 , -1):
self.sift_down(__lowerCAmelCase , __lowerCAmelCase)
return array
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
while True:
lowerCAmelCase = self.get_left_child_idx(__lowerCAmelCase) # noqa: E741
lowerCAmelCase = self.get_right_child_idx(__lowerCAmelCase)
lowerCAmelCase = idx
if l < len(__lowerCAmelCase) and array[l] < array[idx]:
lowerCAmelCase = l
if r < len(__lowerCAmelCase) and array[r] < array[smallest]:
lowerCAmelCase = r
if smallest != idx:
lowerCAmelCase , lowerCAmelCase = array[smallest], array[idx]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase = smallest
else:
break
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_parent_idx(__lowerCAmelCase)
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase , lowerCAmelCase = self.heap[idx], self.heap[p]
lowerCAmelCase , lowerCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase = p
lowerCAmelCase = self.get_parent_idx(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
return self.heap[0]
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.heap[-1], self.heap[0]
lowerCAmelCase , lowerCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
self.heap.append(__lowerCAmelCase)
lowerCAmelCase = len(self.heap) - 1
lowerCAmelCase = node.val
self.sift_up(len(self.heap) - 1)
def a_ ( self):
"""simple docstring"""
return len(self.heap) == 0
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase = new_value
lowerCAmelCase = new_value
self.sift_up(self.idx_of_element[node])
__lowercase = Node('''R''', -1)
__lowercase = Node('''B''', 6)
__lowercase = Node('''A''', 3)
__lowercase = Node('''X''', 1)
__lowercase = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowercase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
UpperCAmelCase_ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase)
torch.manual_seed(0)
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase = CLIPTextModel(__lowerCAmelCase)
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase)).convert("""RGB""")
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = sd_pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = """french fries"""
lowerCAmelCase = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase)
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = [inputs["""prompt"""]] * 2
lowerCAmelCase = np.array(inputs["""image"""]).astype(np.floataa) / 255.0
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).unsqueeze(0).to(__lowerCAmelCase)
lowerCAmelCase = image / 2 + 0.5
lowerCAmelCase = image.permute(0 , 3 , 1 , 2)
lowerCAmelCase = image.repeat(2 , 1 , 1 , 1)
lowerCAmelCase = sd_pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""")
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase)
lowerCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = sd_pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = [round(__lowerCAmelCase , 4) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__lowerCAmelCase) for x in slice]))
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**__lowerCAmelCase)
lowerCAmelCase = VaeImageProcessor(do_resize=__lowerCAmelCase , do_normalize=__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type="""pt"""))[0]
lowerCAmelCase = components["""vae"""]
lowerCAmelCase = self.get_dummy_inputs_by_type(__lowerCAmelCase , input_image_type="""pt""")
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCAmelCase = vae.encode(inputs[image_param]).latent_dist.mode()
lowerCAmelCase = pipe(**__lowerCAmelCase)[0]
lowerCAmelCase = np.abs(out - out_latents_inputs).max()
self.assertLess(__lowerCAmelCase , 1E-4 , """passing latents as image input generate different result from passing image""")
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""")
lowerCAmelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase)
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase)
lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**__lowerCAmelCase).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = 0
def callback_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) -> None:
lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
lowerCAmelCase = False
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def a_ ( self):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase = inputs["""image"""].resize((504, 504))
lowerCAmelCase = """timbrooks/instruct-pix2pix"""
lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images[0]
lowerCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowerCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
| 272 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def snake_case__ ( _A: np.ndarray , _A: int , _A: int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
# compute the shape of the output matrix
lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase = 0
lowerCAmelCase = 0
return updated_arr
def snake_case__ ( _A: np.ndarray , _A: int , _A: int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
# compute the shape of the output matrix
lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase = 0
lowerCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__lowercase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowercase = logging.getLogger(__name__)
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_A , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_A , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_A , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_A , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_A , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_A , type=_A , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_A , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_A , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCAmelCase = parser.parse_args()
return args
def snake_case__ ( _A: Any ) -> Union[str, Any]:
'''simple docstring'''
def fn(_A: str ):
return tokenizer(examples["""text"""] )
return fn
def snake_case__ ( _A: Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCAmelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCAmelCase = tf.train.Features(feature=_A )
lowerCAmelCase = tf.train.Example(features=_A )
lowerCAmelCase = example.SerializeToString()
records.append(_A )
return records
def snake_case__ ( _A: Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase = min(len(_A ) , args.limit )
lowerCAmelCase = dataset.select(range(_A ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase = tokenize_function(_A )
lowerCAmelCase = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A: Dict ):
# Concatenate all texts.
lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase = dataset_tokenized.map(_A , batched=_A , batch_size=1000 , num_proc=4 )
lowerCAmelCase = 0
lowerCAmelCase = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase = len(dataset_snapshot["""input_ids"""] )
lowerCAmelCase = os.path.join(_A , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase = serialized_examples[i]
out_file.write(_A )
print("""Wrote file {} containing {} records""".format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , """w""" ) as f:
print(f"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
__lowercase = parse_args()
main(args)
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = AudioLDMPipeline
UpperCAmelCase_ : Optional[int] = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase_ : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase_ : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowerCAmelCase , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0)
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowerCAmelCase = ClapTextModelWithProjection(__lowerCAmelCase)
lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77)
lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowerCAmelCase , )
lowerCAmelCase = SpeechTaHifiGan(__lowerCAmelCase)
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase) == 256
lowerCAmelCase = audio[:10]
lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = 3 * [inputs.pop("""prompt""")]
lowerCAmelCase = audioldm_pipe.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs["""input_ids"""].to(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.text_encoder(
__lowerCAmelCase , )
lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase = F.normalize(__lowerCAmelCase , dim=-1)
lowerCAmelCase = prompt_embeds
# forward
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = 3 * ["""this is a negative prompt"""]
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = 3 * [inputs.pop("""prompt""")]
lowerCAmelCase = []
for p in [prompt, negative_prompt]:
lowerCAmelCase = audioldm_pipe.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs["""input_ids"""].to(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.text_encoder(
__lowerCAmelCase , )
lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase = F.normalize(__lowerCAmelCase , dim=-1)
embeds.append(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = embeds
# forward
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase)
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = """egg cracking"""
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase) == 256
lowerCAmelCase = audio[:10]
lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase)
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase = 2
lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase = 2
lowerCAmelCase = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase = 2
lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase) / vocoder_sampling_rate == 0.016
lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **__lowerCAmelCase)
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase) / vocoder_sampling_rate == 0.032
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = ["""hey"""]
lowerCAmelCase = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1)
lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase = SpeechTaHifiGan(__lowerCAmelCase).to(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1)
lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a_ ( self):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowerCAmelCase)
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase)
@slow
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="cpu" , __lowerCAmelCase=torch.floataa , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = np.random.RandomState(__lowerCAmelCase).standard_normal((1, 8, 128, 16))
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_inputs(__lowerCAmelCase)
lowerCAmelCase = 25
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase) == 81920
lowerCAmelCase = audio[77230:77240]
lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
lowerCAmelCase = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""")
lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
lowerCAmelCase = audioldm_pipe.to(__lowerCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_inputs(__lowerCAmelCase)
lowerCAmelCase = audioldm_pipe(**__lowerCAmelCase).audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase) == 81920
lowerCAmelCase = audio[27780:27790]
lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
lowerCAmelCase = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowercase = datasets.load_iris()
__lowercase = np.array(data['''data'''])
__lowercase = np.array(data['''target'''])
__lowercase = data['''target_names''']
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y)
def snake_case__ ( _A: Union[str, Any] , _A: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def snake_case__ ( _A: str , _A: Union[str, Any] , _A: List[str] , _A: Any , _A: Union[str, Any]=5 ) -> Dict:
'''simple docstring'''
lowerCAmelCase = zip(_A , _A )
# List of distances of all points from the point to be classified
lowerCAmelCase = []
for data_point in data:
lowerCAmelCase = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 272 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''speech_to_text_2'''
UpperCAmelCase_ : int = ['''past_key_values''']
UpperCAmelCase_ : Union[str, Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , __lowerCAmelCase=10000 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=1024 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 272 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__lowercase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowercase = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowercase = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__lowerCAmelCase , __lowerCAmelCase)["wer"]
else:
lowerCAmelCase = 0
lowerCAmelCase = 0
for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = compute_measures(__lowerCAmelCase , __lowerCAmelCase)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = CycleDiffusionPipeline
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCAmelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCAmelCase_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0)
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase = CLIPTextModel(__lowerCAmelCase)
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = image / 2 + 0.5
if str(__lowerCAmelCase).startswith("""mps"""):
lowerCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
lowerCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
lowerCAmelCase = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = CycleDiffusionPipeline(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(__lowerCAmelCase , """half"""):
lowerCAmelCase = module.half()
lowerCAmelCase = CycleDiffusionPipeline(**__lowerCAmelCase)
lowerCAmelCase = pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
lowerCAmelCase = pipe(**__lowerCAmelCase)
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""")
def a_ ( self):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def a_ ( self):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""")
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""")
lowerCAmelCase = init_image.resize((512, 512))
lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder="""scheduler""")
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(
__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""")
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = """A black colored car"""
lowerCAmelCase = """A blue colored car"""
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pipe(
prompt=__lowerCAmelCase , source_prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image).max() < 5E-1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""")
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""")
lowerCAmelCase = init_image.resize((512, 512))
lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder="""scheduler""")
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
lowerCAmelCase = """A black colored car"""
lowerCAmelCase = """A blue colored car"""
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pipe(
prompt=__lowerCAmelCase , source_prompt=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images
assert np.abs(image - expected_image).max() < 2E-2
| 272 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( _A: list , _A: int , _A: int , _A: int ) -> list:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase = result + left + right
return input_list
def snake_case__ ( _A: list ) -> list:
'''simple docstring'''
if len(_A ) <= 1:
return input_list
lowerCAmelCase = list(_A )
# iteration for two-way merging
lowerCAmelCase = 2
while p <= len(_A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_A ) , _A ):
lowerCAmelCase = i
lowerCAmelCase = i + p - 1
lowerCAmelCase = (low + high + 1) // 2
lowerCAmelCase = merge(_A , _A , _A , _A )
# final merge of last two parts
if p * 2 >= len(_A ):
lowerCAmelCase = i
lowerCAmelCase = merge(_A , 0 , _A , len(_A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowercase = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
__lowercase = []
else:
__lowercase = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = OpenAIGPTForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Optional[int] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase_ : Optional[int] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
lowerCAmelCase = inputs_dict["""labels"""]
lowerCAmelCase = inputs_dict["""labels"""]
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase)
return inputs_dict
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = OpenAIGPTModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""")
model.to(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCAmelCase) # the president is
lowerCAmelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase)
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase)
| 272 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__lowercase = sys.version_info >= (3, 1_0)
def snake_case__ ( _A: Any=None , _A: Optional[int]=None ) -> int:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : str
UpperCAmelCase_ : bool
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : int = 4_2
UpperCAmelCase_ : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[bool] = None
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''titi'''
UpperCAmelCase_ : Union[str, Any] = '''toto'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''titi'''
UpperCAmelCase_ : List[str] = '''toto'''
UpperCAmelCase_ : Dict = 4_2
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : BasicEnum = "toto"
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = BasicEnum(self.foo)
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : MixedTypeEnum = "toto"
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = MixedTypeEnum(self.foo)
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[float] = field(default=lowerCAmelCase__ , metadata={'''help''': '''help message'''} )
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] )
UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : List[int] = list_field(default=[] )
UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
UpperCAmelCase_ : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : List[int] = field()
UpperCAmelCase_ : str = field()
UpperCAmelCase_ : BasicEnum = field()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = BasicEnum(self.required_enum)
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : "BasicEnum" = field()
UpperCAmelCase_ : "Optional[bool]" = None
UpperCAmelCase_ : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
UpperCAmelCase_ : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : bool | None = None
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : int | None = None
UpperCAmelCase_ : float | None = field(default=lowerCAmelCase__ , metadata={'''help''': '''help message'''} )
UpperCAmelCase_ : str | None = None
UpperCAmelCase_ : list[str] | None = list_field(default=[] )
UpperCAmelCase_ : list[int] | None = list_field(default=[] )
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
lowerCAmelCase = {k: v for k, v in vars(__lowerCAmelCase).items() if k != """container"""}
lowerCAmelCase = {k: v for k, v in vars(__lowerCAmelCase).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __lowerCAmelCase) and yy.get("""choices""" , __lowerCAmelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__lowerCAmelCase) , yy["""type"""](__lowerCAmelCase))
del xx["type"], yy["type"]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCAmelCase , required=__lowerCAmelCase)
expected.add_argument("""--bar""" , type=__lowerCAmelCase , required=__lowerCAmelCase)
expected.add_argument("""--baz""" , type=__lowerCAmelCase , required=__lowerCAmelCase)
expected.add_argument("""--flag""" , type=__lowerCAmelCase , default=__lowerCAmelCase , const=__lowerCAmelCase , nargs="""?""")
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(__lowerCAmelCase , look_for_args_file=__lowerCAmelCase)
self.assertFalse(example.flag)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__lowerCAmelCase)
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCAmelCase , help="""help message""")
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCAmelCase , default=__lowerCAmelCase , const=__lowerCAmelCase , nargs="""?""")
expected.add_argument("""--baz""" , type=__lowerCAmelCase , default=__lowerCAmelCase , const=__lowerCAmelCase , nargs="""?""")
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__lowerCAmelCase , dest="""baz""")
expected.add_argument("""--opt""" , type=__lowerCAmelCase , default=__lowerCAmelCase)
lowerCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCAmelCase)
for dataclass_type in dataclass_types:
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_args([])
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase))
lowerCAmelCase = parser.parse_args(["""--foo""", """--no_baz"""])
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase))
lowerCAmelCase = parser.parse_args(["""--foo""", """--baz"""])
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase))
lowerCAmelCase = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""])
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase))
lowerCAmelCase = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""])
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , baz=__lowerCAmelCase , opt=__lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42]) , )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_args([])
self.assertEqual(args.foo , """toto""")
lowerCAmelCase = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
lowerCAmelCase = parser.parse_args(["""--foo""", """titi"""])
self.assertEqual(args.foo , """titi""")
lowerCAmelCase = parser.parse_args_into_dataclasses(["""--foo""", """titi"""])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
lowerCAmelCase = parser.parse_args(["""--foo""", """42"""])
self.assertEqual(args.foo , 42)
lowerCAmelCase = parser.parse_args_into_dataclasses(["""--foo""", """42"""])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def a_ ( self):
"""simple docstring"""
@dataclass
class a__:
'''simple docstring'''
UpperCAmelCase_ : Literal["titi", "toto", 4_2] = "toto"
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42]) , )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_args([])
self.assertEqual(args.foo , """toto""")
lowerCAmelCase = parser.parse_args(["""--foo""", """titi"""])
self.assertEqual(args.foo , """titi""")
lowerCAmelCase = parser.parse_args(["""--foo""", """42"""])
self.assertEqual(args.foo , 42)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__lowerCAmelCase)
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__lowerCAmelCase)
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCAmelCase)
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__lowerCAmelCase)
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_args([])
self.assertEqual(
__lowerCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3]) , )
lowerCAmelCase = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split())
self.assertEqual(__lowerCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7]))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__lowerCAmelCase , type=__lowerCAmelCase)
expected.add_argument("""--bar""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""help message""")
expected.add_argument("""--baz""" , default=__lowerCAmelCase , type=__lowerCAmelCase)
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__lowerCAmelCase)
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__lowerCAmelCase)
lowerCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__lowerCAmelCase)
for dataclass_type in dataclass_types:
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_args([])
self.assertEqual(__lowerCAmelCase , Namespace(foo=__lowerCAmelCase , bar=__lowerCAmelCase , baz=__lowerCAmelCase , ces=[] , des=[]))
lowerCAmelCase = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split())
self.assertEqual(__lowerCAmelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3]))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__lowerCAmelCase , required=__lowerCAmelCase)
expected.add_argument("""--required_str""" , type=__lowerCAmelCase , required=__lowerCAmelCase)
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=__lowerCAmelCase , )
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__lowerCAmelCase , required=__lowerCAmelCase)
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=__lowerCAmelCase , )
expected.add_argument("""--opt""" , type=__lowerCAmelCase , default=__lowerCAmelCase)
expected.add_argument("""--baz""" , default="""toto""" , type=__lowerCAmelCase , help="""help message""")
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__lowerCAmelCase)
self.argparsersEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
lowerCAmelCase = parser.parse_dict(__lowerCAmelCase)[0]
lowerCAmelCase = BasicExample(**__lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__lowerCAmelCase , parser.parse_dict , __lowerCAmelCase , allow_extra_keys=__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(__lowerCAmelCase , """temp_json""")
os.mkdir(__lowerCAmelCase)
with open(temp_local_path + """.json""" , """w+""") as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + """.json"""))[0]
lowerCAmelCase = BasicExample(**__lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
lowerCAmelCase = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(__lowerCAmelCase , """temp_yaml""")
os.mkdir(__lowerCAmelCase)
with open(temp_local_path + """.yaml""" , """w+""") as f:
yaml.dump(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + """.yaml"""))[0]
lowerCAmelCase = BasicExample(**__lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = HfArgumentParser(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
| 272 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = '''blenderbot-small'''
UpperCAmelCase_ : Union[str, Any] = ['''past_key_values''']
UpperCAmelCase_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , __lowerCAmelCase=50265 , __lowerCAmelCase=512 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=8 , __lowerCAmelCase=2048 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=512 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
])
if self.use_past:
lowerCAmelCase = {0: """batch"""}
lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""")
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
])
if self.use_past:
lowerCAmelCase , lowerCAmelCase = self.num_layers
for i in range(__lowerCAmelCase):
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
])
return common_inputs
@property
def a_ ( self):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = super().outputs
else:
lowerCAmelCase = super(__lowerCAmelCase , self).outputs
if self.use_past:
lowerCAmelCase , lowerCAmelCase = self.num_layers
for i in range(__lowerCAmelCase):
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# Generate decoder inputs
lowerCAmelCase = seq_length if not self.use_past else 1
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase = dict(**__lowerCAmelCase , **__lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape
lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
lowerCAmelCase , lowerCAmelCase = self.num_attention_heads
lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase = decoder_seq_length + 3
lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase)] , dim=1)
lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase , lowerCAmelCase = self.num_layers
lowerCAmelCase = min(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = max(__lowerCAmelCase , __lowerCAmelCase) - min_num_layers
lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCAmelCase),
torch.zeros(__lowerCAmelCase),
torch.zeros(__lowerCAmelCase),
torch.zeros(__lowerCAmelCase),
))
# TODO: test this.
lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__lowerCAmelCase , __lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(__lowerCAmelCase), torch.zeros(__lowerCAmelCase)))
return common_inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase = seqlen + 2
lowerCAmelCase , lowerCAmelCase = self.num_layers
lowerCAmelCase , lowerCAmelCase = self.num_attention_heads
lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase = common_inputs["""attention_mask"""].dtype
lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase)] , dim=1)
lowerCAmelCase = [
(torch.zeros(__lowerCAmelCase), torch.zeros(__lowerCAmelCase)) for _ in range(__lowerCAmelCase)
]
return common_inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase = tokenizer.num_special_tokens_to_add(__lowerCAmelCase)
lowerCAmelCase = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase = [""" """.join([tokenizer.unk_token]) * seq_length] * batch_size
lowerCAmelCase = dict(tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase))
return common_inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase)
elif self.task == "causal-lm":
lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase)
else:
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase)
return common_inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = super()._flatten_past_key_values_(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
else:
lowerCAmelCase = super(__lowerCAmelCase , self)._flatten_past_key_values_(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
| 272 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 1 |
'''simple docstring'''
import math
__lowercase = 1_0
__lowercase = 7
__lowercase = BALLS_PER_COLOUR * NUM_COLOURS
def snake_case__ ( _A: int = 20 ) -> str:
'''simple docstring'''
lowerCAmelCase = math.comb(_A , _A )
lowerCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _A )
lowerCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(2_0))
| 272 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( _A: list[int] , _A: int ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = len(_A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase = i + 1
else:
lowerCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 1_1, 1_5], 9) = }')
| 272 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = ViTMSNModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase)
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""")
print("""Labels: {labels}""")
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Optional[int] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ViTMSNModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
lowerCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMSNModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""") if is_vision_available() else None
@slow
def a_ ( self):
"""simple docstring"""
torch.manual_seed(2)
lowerCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""").to(__lowerCAmelCase)
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""").to(__lowerCAmelCase)
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**__lowerCAmelCase)
# verify the logits
lowerCAmelCase = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375]).to(__lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4))
| 272 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__lowercase = '''src/transformers'''
# Matches is_xxx_available()
__lowercase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowercase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowercase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowercase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowercase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowercase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowercase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowercase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowercase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowercase = re.compile(R'''^\s*try:''')
# Catches a line with else:
__lowercase = re.compile(R'''^\s*else:''')
def snake_case__ ( _A: Optional[Any] ) -> Dict:
'''simple docstring'''
if _re_test_backend.search(_A ) is None:
return None
lowerCAmelCase = [b[0] for b in _re_backend.findall(_A )]
backends.sort()
return "_and_".join(_A )
def snake_case__ ( _A: Tuple ) -> Dict:
'''simple docstring'''
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = 0
while line_index < len(_A ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_A ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_A ):
lowerCAmelCase = _re_one_line_import_struct.search(_A ).groups()[0]
lowerCAmelCase = re.findall(r"""\[([^\]]+)\]""" , _A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowerCAmelCase = _re_import_struct_key_value.search(_A )
if single_line_import_search is not None:
lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_A ) > 0]
objects.extend(_A )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(_A ) is not None:
objects.append(_re_import_struct_add_one.search(_A ).groups()[0] )
elif _re_import_struct_add_many.search(_A ) is not None:
lowerCAmelCase = _re_import_struct_add_many.search(_A ).groups()[0].split(""", """ )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_between_brackets.search(_A ) is not None:
lowerCAmelCase = _re_between_brackets.search(_A ).groups()[0].split(""", """ )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_quote_object.search(_A ) is not None:
objects.append(_re_quote_object.search(_A ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase = []
while (
line_index < len(_A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case__ ( _A: Any , _A: Optional[Any] ) -> List[Any]:
'''simple docstring'''
def find_duplicates(_A: Tuple ):
return [k for k, v in collections.Counter(_A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase = []
for key in import_dict_objects.keys():
lowerCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def snake_case__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase = os.path.join(_A , """__init__.py""" )
lowerCAmelCase = parse_init(_A )
if objects is not None:
lowerCAmelCase = analyze_results(*_A )
if len(_A ) > 0:
lowerCAmelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_A ) )
if len(_A ) > 0:
raise ValueError("""\n\n""".join(_A ) )
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = []
for path, directories, files in os.walk(_A ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_A ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowerCAmelCase = str((Path(_A ) / folder).relative_to(_A ) )
lowerCAmelCase = short_path.replace(os.path.sep , """.""" )
submodules.append(_A )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase = str((Path(_A ) / fname).relative_to(_A ) )
lowerCAmelCase = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_A )
return submodules
__lowercase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def snake_case__ ( ) -> Any:
'''simple docstring'''
from transformers.utils import direct_transformers_import
lowerCAmelCase = direct_transformers_import(_A )
lowerCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_A , """__init__.py""" ) , """r""" ) as f:
lowerCAmelCase = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , _A ) ) )
lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_A ) > 0:
lowerCAmelCase = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 272 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 1 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
requires_backends(self , """decord""")
self.check_model_type(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None):
"""simple docstring"""
lowerCAmelCase = {}
if frame_sampling_rate is not None:
lowerCAmelCase = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase = num_frames
lowerCAmelCase = {}
if top_k is not None:
lowerCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=1):
"""simple docstring"""
if num_frames is None:
lowerCAmelCase = self.model.config.num_frames
if video.startswith("""http://""") or video.startswith("""https://"""):
lowerCAmelCase = BytesIO(requests.get(__lowerCAmelCase).content)
lowerCAmelCase = VideoReader(__lowerCAmelCase)
videoreader.seek(0)
lowerCAmelCase = 0
lowerCAmelCase = num_frames * frame_sampling_rate - 1
lowerCAmelCase = np.linspace(__lowerCAmelCase , __lowerCAmelCase , num=__lowerCAmelCase , dtype=np.intaa)
lowerCAmelCase = videoreader.get_batch(__lowerCAmelCase).asnumpy()
lowerCAmelCase = list(__lowerCAmelCase)
lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=self.framework)
return model_inputs
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.model(**__lowerCAmelCase)
return model_outputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase = model_outputs.logits.softmax(-1)[0]
lowerCAmelCase , lowerCAmelCase = probs.topk(__lowerCAmelCase)
else:
raise ValueError(f"Unsupported framework: {self.framework}")
lowerCAmelCase = scores.tolist()
lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase)]
| 272 | '''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 272 | 1 |
'''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 2_5_6
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['''melgan''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
# From MELGAN
lowerCAmelCase = math.log(1E-5) # Matches MelGAN training.
lowerCAmelCase = 4.0 # Largest value for most examples
lowerCAmelCase = 128
self.register_modules(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = output_range
if clip:
lowerCAmelCase = torch.clip(__lowerCAmelCase , self.min_value , self.max_value)
# Scale to [0, 1].
lowerCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = input_range
lowerCAmelCase = torch.clip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) if clip else outputs
# Scale to [0, 1].
lowerCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = input_tokens > 0
lowerCAmelCase , lowerCAmelCase = self.notes_encoder(
encoder_input_tokens=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = self.continuous_encoder(
encoder_inputs=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = noise_time
if not torch.is_tensor(__lowerCAmelCase):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowerCAmelCase) and len(timesteps.shape) == 0:
lowerCAmelCase = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
lowerCAmelCase = self.decoder(
encodings_and_masks=__lowerCAmelCase , decoder_input_tokens=__lowerCAmelCase , decoder_noise_time=__lowerCAmelCase)
return logits
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 100 , __lowerCAmelCase = True , __lowerCAmelCase = "numpy" , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCAmelCase)}.")
lowerCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
lowerCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa)
lowerCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowerCAmelCase):
if i == 0:
lowerCAmelCase = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
lowerCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCAmelCase = ones
lowerCAmelCase = self.scale_features(
__lowerCAmelCase , output_range=[-1.0, 1.0] , clip=__lowerCAmelCase)
lowerCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowerCAmelCase , continuous_mask=__lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
lowerCAmelCase = self.decode(
encodings_and_masks=__lowerCAmelCase , input_tokens=__lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase).prev_sample
lowerCAmelCase = self.scale_to_features(__lowerCAmelCase , input_range=[-1.0, 1.0])
lowerCAmelCase = mel[:1]
lowerCAmelCase = mel.cpu().float().numpy()
lowerCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase)
logger.info("""Generated segment""" , __lowerCAmelCase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""")
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""")
if output_type == "numpy":
lowerCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
lowerCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCAmelCase)
| 272 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
lowerCAmelCase = get_activation("""gelu""")
self.assertTrue(torch.allclose(gelu_python(__lowerCAmelCase) , torch_builtin(__lowerCAmelCase)))
self.assertFalse(torch.allclose(gelu_python(__lowerCAmelCase) , gelu_new(__lowerCAmelCase)))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
lowerCAmelCase = get_activation("""gelu""")
lowerCAmelCase = get_activation("""gelu_10""")
lowerCAmelCase = torch_builtin(__lowerCAmelCase)
lowerCAmelCase = geluaa(__lowerCAmelCase)
lowerCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(__lowerCAmelCase).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def a_ ( self):
"""simple docstring"""
get_activation("""gelu""")
get_activation("""gelu_10""")
get_activation("""gelu_fast""")
get_activation("""gelu_new""")
get_activation("""gelu_python""")
get_activation("""gelu_pytorch_tanh""")
get_activation("""linear""")
get_activation("""mish""")
get_activation("""quick_gelu""")
get_activation("""relu""")
get_activation("""sigmoid""")
get_activation("""silu""")
get_activation("""swish""")
get_activation("""tanh""")
with self.assertRaises(__lowerCAmelCase):
get_activation("""bogus""")
with self.assertRaises(__lowerCAmelCase):
get_activation(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = get_activation("""gelu""")
lowerCAmelCase = 1
lowerCAmelCase = get_activation("""gelu""")
self.assertEqual(acta.a , 1)
with self.assertRaises(__lowerCAmelCase):
lowerCAmelCase = acta.a
| 272 | '''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = XLMRobertaTokenizer
UpperCAmelCase_ : int = XLMRobertaTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = True
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """<pad>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(__lowerCAmelCase) , 1002)
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@cached_property
def a_ ( self):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""")
def a_ ( self):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name)
lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase)
lowerCAmelCase = pickle.dumps(__lowerCAmelCase)
pickle.loads(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(__lowerCAmelCase)
lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 272 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowercase = 2_5_6_0_4_7
__lowercase = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = NllbTokenizer
UpperCAmelCase_ : Optional[int] = NllbTokenizerFast
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = {}
def a_ ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f)
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase)
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase))
shutil.rmtree(__lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
if not self.test_seqaseq:
return
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Longer text that will definitely require truncation.
lowerCAmelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 10)
# max_target_length will default to max_length if not specified
lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , return_tensors="""pt""")
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 3)
lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""")
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3)
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase)
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = [AddedToken("""<special>""" , lstrip=__lowerCAmelCase)]
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tokenizer_r.encode("""Hey this is a <special> token""")
lowerCAmelCase = tokenizer_r.encode("""<special>""" , add_special_tokens=__lowerCAmelCase)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = self.tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode("""Hey this is a <special> token""")
lowerCAmelCase = tokenizer_cr.encode("""Hey this is a <special> token""")
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase_ : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase_ : Optional[int] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase_ : List[Any] = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def a_ ( cls):
"""simple docstring"""
lowerCAmelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""")
lowerCAmelCase = 1
return cls
def a_ ( self):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids)
# fmt: off
lowerCAmelCase = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
lowerCAmelCase = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase)
lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __lowerCAmelCase)
lowerCAmelCase = 10
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase).input_ids[0]
self.assertEqual(ids[-1] , 2)
self.assertEqual(ids[0] , __lowerCAmelCase)
self.assertEqual(len(__lowerCAmelCase) , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""]) , [256203, 3])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase)
lowerCAmelCase = NllbTokenizer.from_pretrained(__lowerCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="""pt""" , )
lowerCAmelCase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""])
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 15) , batch.input_ids.shape)
self.assertEqual((2, 15) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase)
self.assertEqual(__lowerCAmelCase , batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="""pt""")
lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="""pt""")
lowerCAmelCase = targets["""input_ids"""]
lowerCAmelCase = shift_tokens_right(
__lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""")
self.assertEqual(
nested_simplify(__lowerCAmelCase) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""")
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047])
lowerCAmelCase = False
lowerCAmelCase = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""")
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2])
| 272 | '''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
while a != 0:
lowerCAmelCase , lowerCAmelCase = b % a, a
return b
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if gcd(_A , _A ) != 1:
lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m
while va != 0:
lowerCAmelCase = ua // va
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 272 | 1 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case__ ( _A: List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = job["""started_at"""]
lowerCAmelCase = job["""completed_at"""]
lowerCAmelCase = date_parser.parse(_A )
lowerCAmelCase = date_parser.parse(_A )
lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = duration_in_min
return job_info
def snake_case__ ( _A: List[str] , _A: List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
lowerCAmelCase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
lowerCAmelCase = requests.get(_A , headers=_A ).json()
lowerCAmelCase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(_A ) for job in result["""jobs"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_A ):
lowerCAmelCase = requests.get(url + f"&page={i + 2}" , headers=_A ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(_A ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowercase = parser.parse_args()
__lowercase = get_job_time(args.workflow_run_id)
__lowercase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 272 | '''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase)
lowerCAmelCase = nn.silu(__lowerCAmelCase)
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase)
return temb
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : int = 3_2
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 1
@nn.compact
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
return get_sinusoidal_embeddings(
__lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 272 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['''image_processor''']
UpperCAmelCase_ : Tuple = '''SamImageProcessor'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase)
lowerCAmelCase = self.image_processor
lowerCAmelCase = -10
lowerCAmelCase = self.image_processor.size["""longest_edge"""]
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase = encoding_image_processor["""original_sizes"""]
if hasattr(__lowerCAmelCase , """numpy"""): # Checks if Torch or TF tensor
lowerCAmelCase = original_sizes.numpy()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._check_and_preprocess_points(
input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , input_boxes=__lowerCAmelCase , )
lowerCAmelCase = self._normalize_and_convert(
__lowerCAmelCase , __lowerCAmelCase , input_points=__lowerCAmelCase , input_labels=__lowerCAmelCase , input_boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , )
return encoding_image_processor
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , original_sizes[0]) for point in input_points
]
else:
lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , __lowerCAmelCase)
for point, original_size in zip(__lowerCAmelCase , __lowerCAmelCase)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowerCAmelCase , lowerCAmelCase = self._pad_points_and_labels(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = np.array(__lowerCAmelCase)
if input_labels is not None:
lowerCAmelCase = np.array(__lowerCAmelCase)
if input_boxes is not None:
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , original_sizes[0] , is_bounding_box=__lowerCAmelCase)
for box in input_boxes
]
else:
lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __lowerCAmelCase , __lowerCAmelCase , is_bounding_box=__lowerCAmelCase)
for box, original_size in zip(__lowerCAmelCase , __lowerCAmelCase)
]
lowerCAmelCase = np.array(__lowerCAmelCase)
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase)
# boxes batch size of 1 by default
lowerCAmelCase = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase = tf.convert_to_tensor(__lowerCAmelCase)
# boxes batch size of 1 by default
lowerCAmelCase = tf.expand_dims(__lowerCAmelCase , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase)
# point batch size of 1 by default
lowerCAmelCase = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase = tf.convert_to_tensor(__lowerCAmelCase)
# point batch size of 1 by default
lowerCAmelCase = tf.expand_dims(__lowerCAmelCase , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points})
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase)
# point batch size of 1 by default
lowerCAmelCase = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase = tf.convert_to_tensor(__lowerCAmelCase)
# point batch size of 1 by default
lowerCAmelCase = tf.expand_dims(__lowerCAmelCase , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels})
return encoding_image_processor
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = max([point.shape[0] for point in input_points])
lowerCAmelCase = []
for i, point in enumerate(__lowerCAmelCase):
if point.shape[0] != expected_nb_points:
lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowerCAmelCase = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(__lowerCAmelCase)
lowerCAmelCase = processed_input_points
return input_points, input_labels
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = original_size
lowerCAmelCase , lowerCAmelCase = self.image_processor._get_preprocess_shape(__lowerCAmelCase , longest_edge=__lowerCAmelCase)
lowerCAmelCase = deepcopy(__lowerCAmelCase).astype(__lowerCAmelCase)
if is_bounding_box:
lowerCAmelCase = coords.reshape(-1 , 2 , 2)
lowerCAmelCase = coords[..., 0] * (new_w / old_w)
lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase = coords.reshape(-1 , 4)
return coords
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(__lowerCAmelCase , """numpy"""): # Checks for TF or Torch tensor
lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase) or not isinstance(input_points[0] , __lowerCAmelCase):
raise ValueError("""Input points must be a list of list of floating points.""")
lowerCAmelCase = [np.array(__lowerCAmelCase) for input_point in input_points]
else:
lowerCAmelCase = None
if input_labels is not None:
if hasattr(__lowerCAmelCase , """numpy"""):
lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase) or not isinstance(input_labels[0] , __lowerCAmelCase):
raise ValueError("""Input labels must be a list of list integers.""")
lowerCAmelCase = [np.array(__lowerCAmelCase) for label in input_labels]
else:
lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__lowerCAmelCase , """numpy"""):
lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__lowerCAmelCase , __lowerCAmelCase)
or not isinstance(input_boxes[0] , __lowerCAmelCase)
or not isinstance(input_boxes[0][0] , __lowerCAmelCase)
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""")
lowerCAmelCase = [np.array(__lowerCAmelCase).astype(np.floataa) for box in input_boxes]
else:
lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__lowerCAmelCase))
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.image_processor.post_process_masks(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: Dict , _A: Union[str, Any] , _A: Dict , _A: List[str] , _A: Optional[int] , _A: Dict ) -> List[str]:
'''simple docstring'''
if index == r:
for j in range(_A ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def snake_case__ ( _A: List[str] , _A: Optional[int] , _A: Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowerCAmelCase = controlnet_params
lowerCAmelCase = """bird"""
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples)
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""")
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples)
lowerCAmelCase = jax.random.PRNGKey(0)
lowerCAmelCase = jax.random.split(__lowerCAmelCase , jax.device_count())
lowerCAmelCase = replicate(__lowerCAmelCase)
lowerCAmelCase = shard(__lowerCAmelCase)
lowerCAmelCase = shard(__lowerCAmelCase)
lowerCAmelCase = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
lowerCAmelCase = images[0, 253:256, 253:256, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowerCAmelCase = controlnet_params
lowerCAmelCase = """Chef in the kitchen"""
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples)
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""")
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples)
lowerCAmelCase = jax.random.PRNGKey(0)
lowerCAmelCase = jax.random.split(__lowerCAmelCase , jax.device_count())
lowerCAmelCase = replicate(__lowerCAmelCase)
lowerCAmelCase = shard(__lowerCAmelCase)
lowerCAmelCase = shard(__lowerCAmelCase)
lowerCAmelCase = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
lowerCAmelCase = images[0, 253:256, 253:256, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
| 272 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = [label.strip() for label in labels.split(""",""") if label.strip()]
return labels
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if len(__lowerCAmelCase) == 0 or len(__lowerCAmelCase) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__lowerCAmelCase))
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = [sequences]
lowerCAmelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__lowerCAmelCase)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase=ZeroShotClassificationArgumentHandler() , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = args_parser
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""")
@property
def a_ ( self):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail"""):
return ind
return -1
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=TruncationStrategy.ONLY_FIRST , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""")
lowerCAmelCase = self.tokenizer.eos_token
try:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , )
except Exception as e:
if "too short" in str(__lowerCAmelCase):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
if kwargs.get("""multi_class""" , __lowerCAmelCase) is not None:
lowerCAmelCase = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""")
lowerCAmelCase = {}
if "candidate_labels" in kwargs:
lowerCAmelCase = self._args_parser._parse_labels(kwargs["""candidate_labels"""])
if "hypothesis_template" in kwargs:
lowerCAmelCase = kwargs["""hypothesis_template"""]
lowerCAmelCase = {}
if "multi_label" in kwargs:
lowerCAmelCase = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase , ):
"""simple docstring"""
if len(__lowerCAmelCase) == 0:
pass
elif len(__lowerCAmelCase) == 1 and "candidate_labels" not in kwargs:
lowerCAmelCase = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}")
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="This example is {}."):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self._args_parser(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
for i, (candidate_label, sequence_pair) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase)):
lowerCAmelCase = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__lowerCAmelCase) - 1,
**model_input,
}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = inputs["""candidate_label"""]
lowerCAmelCase = inputs["""sequence"""]
lowerCAmelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCAmelCase = self.model(**__lowerCAmelCase)
lowerCAmelCase = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase = [outputs["""candidate_label"""] for outputs in model_outputs]
lowerCAmelCase = [outputs["""sequence"""] for outputs in model_outputs]
lowerCAmelCase = np.concatenate([output["""logits"""].numpy() for output in model_outputs])
lowerCAmelCase = logits.shape[0]
lowerCAmelCase = len(__lowerCAmelCase)
lowerCAmelCase = N // n
lowerCAmelCase = logits.reshape((num_sequences, n, -1))
if multi_label or len(__lowerCAmelCase) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCAmelCase = self.entailment_id
lowerCAmelCase = -1 if entailment_id == 0 else 0
lowerCAmelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCAmelCase = np.exp(__lowerCAmelCase) / np.exp(__lowerCAmelCase).sum(-1 , keepdims=__lowerCAmelCase)
lowerCAmelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCAmelCase = reshaped_outputs[..., self.entailment_id]
lowerCAmelCase = np.exp(__lowerCAmelCase) / np.exp(__lowerCAmelCase).sum(-1 , keepdims=__lowerCAmelCase)
lowerCAmelCase = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 272 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowercase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | '''simple docstring'''
def snake_case__ ( _A: str ) -> list[int]:
'''simple docstring'''
lowerCAmelCase = [0 for i in range(len(_A ) )]
# initialize interval's left pointer and right pointer
lowerCAmelCase , lowerCAmelCase = 0, 0
for i in range(1 , len(_A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCAmelCase = min_edge
while go_next(_A , _A , _A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( _A: str , _A: str ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case__ ( _A: Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = int(_A )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def snake_case__ ( _A: Any , _A: Dict , _A: int , _A: Dict , _A: Dict=300 ) -> Optional[Any]:
'''simple docstring'''
return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def snake_case__ ( _A: Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCAmelCase = f"{elt:.6f}" if isinstance(_A , _A ) else str(_A )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class a__:
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 5
UpperCAmelCase_ : str = 0.2
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 300 , ):
"""simple docstring"""
lowerCAmelCase = total
lowerCAmelCase = """""" if prefix is None else prefix
lowerCAmelCase = leave
lowerCAmelCase = parent
lowerCAmelCase = width
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = value
if comment is not None:
lowerCAmelCase = comment
if self.last_value is None:
lowerCAmelCase = lowerCAmelCase = time.time()
lowerCAmelCase = lowerCAmelCase = value
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = self.warmup
lowerCAmelCase = 1
self.update_bar(__lowerCAmelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
lowerCAmelCase = time.time()
lowerCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCAmelCase = self.elapsed_time / (value - self.start_value)
else:
lowerCAmelCase = None
if value >= self.total:
lowerCAmelCase = self.total
lowerCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCAmelCase)
lowerCAmelCase = value
lowerCAmelCase = current_time
if self.average_time_per_item is None:
lowerCAmelCase = 1
else:
lowerCAmelCase = max(int(self.update_every / self.average_time_per_item) , 1)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
lowerCAmelCase = """ """ * (len(str(self.total)) - len(str(__lowerCAmelCase))) + str(__lowerCAmelCase)
if self.elapsed_time is None:
lowerCAmelCase = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
lowerCAmelCase = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
lowerCAmelCase = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCAmelCase = disp.display(disp.HTML(self.html_code) , display_id=__lowerCAmelCase)
else:
self.output.update(disp.HTML(self.html_code))
def a_ ( self):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""""""))
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
super().__init__(__lowerCAmelCase)
lowerCAmelCase = None if column_names is None else [column_names]
lowerCAmelCase = None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCAmelCase = disp.display(disp.HTML(self.html_code) , display_id=__lowerCAmelCase)
else:
self.output.update(disp.HTML(self.html_code))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if self.inner_table is None:
lowerCAmelCase = [list(values.keys()), list(values.values())]
else:
lowerCAmelCase = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCAmelCase)
lowerCAmelCase = columns
self.inner_table.append([values[c] for c in columns])
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=300):
"""simple docstring"""
lowerCAmelCase = NotebookProgressBar(__lowerCAmelCase , prefix=__lowerCAmelCase , parent=self , width=__lowerCAmelCase)
return self.child_bar
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = None
self.display()
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""")
lowerCAmelCase = NotebookTrainingTracker(state.max_steps , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if not has_length(__lowerCAmelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCAmelCase = self.training_tracker.add_child(len(__lowerCAmelCase))
else:
lowerCAmelCase = NotebookProgressBar(len(__lowerCAmelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCAmelCase = None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCAmelCase = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCAmelCase = state.global_step
self.training_tracker.write_line(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if self.training_tracker is not None:
lowerCAmelCase = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history):
if "loss" in log:
lowerCAmelCase = log["""loss"""]
break
if self.first_column == "Epoch":
lowerCAmelCase = int(state.epoch)
else:
lowerCAmelCase = state.global_step
lowerCAmelCase = """eval"""
for k in metrics:
if k.endswith("""_loss"""):
lowerCAmelCase = re.sub(r"""\_loss$""" , """""" , __lowerCAmelCase)
lowerCAmelCase = metrics.pop("""total_flos""" , __lowerCAmelCase)
lowerCAmelCase = metrics.pop("""epoch""" , __lowerCAmelCase)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_runtime" , __lowerCAmelCase)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_samples_per_second" , __lowerCAmelCase)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_steps_per_second" , __lowerCAmelCase)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , __lowerCAmelCase)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
lowerCAmelCase = v
else:
lowerCAmelCase = k.split("""_""")
lowerCAmelCase = """ """.join([part.capitalize() for part in splits[1:]])
lowerCAmelCase = v
self.training_tracker.write_line(__lowerCAmelCase)
self.training_tracker.remove_child()
lowerCAmelCase = None
# Evaluation takes a long time so we should force the next update.
lowerCAmelCase = True
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}" , force_update=__lowerCAmelCase)
lowerCAmelCase = None
| 272 | '''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__lowerCAmelCase , )
assert hasattr(self , """env""")
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
lowerCAmelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCAmelCase , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
TrainingJobAnalytics(__lowerCAmelCase).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.create_estimator(__lowerCAmelCase)
# run training
estimator.fit()
# result dataframe
lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase)
| 272 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 1 |
'''simple docstring'''
def snake_case__ ( _A: list ) -> list:
'''simple docstring'''
lowerCAmelCase = len(_A )
for i in range(1 , _A ):
lowerCAmelCase = collection[i]
lowerCAmelCase = 0
lowerCAmelCase = i - 1
while low <= high:
lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase = mid - 1
else:
lowerCAmelCase = mid + 1
for j in range(_A , _A , -1 ):
lowerCAmelCase = collection[j - 1]
lowerCAmelCase = val
return collection
if __name__ == "__main__":
__lowercase = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 272 | '''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 18}
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = LevitImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = LevitImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 272 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase__ = "CompVis/stable-diffusion-v1-1"
UpperCAmelCase__ = "CompVis/stable-diffusion-v1-2"
UpperCAmelCase__ = "CompVis/stable-diffusion-v1-3"
UpperCAmelCase__ = "CompVis/stable-diffusion-v1-4"
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : bool = True , ) ->List[str]:
"""simple docstring"""
super()._init_()
a = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
a = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
a = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
a = StableDiffusionPipeline(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , requires_safety_checker=__UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self : str ) ->Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __UpperCAmelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) ->int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : List[str] , ) ->Any:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Tuple , ) ->Tuple:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : List[str] , ) ->Tuple:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : int , ) ->Optional[int]:
"""simple docstring"""
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
a = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
a = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
a = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 0 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.