code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , ):
'''simple docstring'''
__A : str = parent
__A : int = batch_size
__A : Optional[Any] = image_size
__A : Dict = patch_size
__A : List[str] = num_channels
__A : List[str] = embed_dim
__A : Any = depths
__A : str = num_heads
__A : List[str] = window_size
__A : Union[str, Any] = mlp_ratio
__A : Union[str, Any] = qkv_bias
__A : int = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Optional[int] = drop_path_rate
__A : List[Any] = hidden_act
__A : Optional[Any] = use_absolute_embeddings
__A : Optional[int] = patch_norm
__A : List[Any] = layer_norm_eps
__A : int = initializer_range
__A : str = is_training
__A : Dict = scope
__A : Dict = use_labels
__A : int = type_sequence_label_size
__A : Union[str, Any] = encoder_stride
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = SwinvaModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : int = model(_UpperCAmelCase)
__A : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__A : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = SwinvaForMaskedImageModeling(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__A : Dict = 1
__A : Any = SwinvaForMaskedImageModeling(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__A : List[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.type_sequence_label_size
__A : Union[str, Any] = SwinvaForImageClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = SwinvaModelTester(self)
__A : Any = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_UpperCAmelCase)
__A : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : List[Any] = [*signature.parameters.keys()]
__A : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = True
for model_class in self.all_model_classes:
__A : Optional[Any] = True
__A : int = False
__A : List[Any] = True
__A : List[Any] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Any = outputs.attentions
__A : Any = len(self.model_tester.depths)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : str = True
__A : Union[str, Any] = config.window_size**2
__A : List[Any] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : int = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : str = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__A : int = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : int = True
__A : List[Any] = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
if hasattr(self.model_tester , 'num_hidden_states_types'):
__A : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__A : str = 2
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Tuple = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Tuple = outputs.hidden_states
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# Swinv2 has a different seq_length
__A : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__A : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A ,__A ,__A ,__A : Any = reshaped_hidden_states[0].shape
__A : Tuple = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__A : int = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = 3
__A : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__A : Any = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Any = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = SwinvaModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = _config_zero_init(_UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = model_class(config=_UpperCAmelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__A : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : Tuple = model(**_UpperCAmelCase)
# verify the logits
__A : Optional[int] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Optional[Any] = torch.tensor([-0.3947, -0.4306, 0.0026]).to(_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4)) | 8 |
'''simple docstring'''
from collections import namedtuple
__A : Union[str, Any] = namedtuple('from_to', 'from_ to')
__A : str = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ ( a : float , a : str , a : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __lt__( self: List[str] , a: Optional[Any] ):
return self[-1] < other[-1]
def __eq__( self: Dict , a: Optional[int] ):
return self[-1] == other[-1]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : list[Stack] = []
# sort into stacks
for element in collection:
__lowerCamelCase : Union[str, Any] = Stack([element] )
__lowerCamelCase : Dict = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i != len(SCREAMING_SNAKE_CASE__ ):
stacks[i].append(SCREAMING_SNAKE_CASE__ )
else:
stacks.append(SCREAMING_SNAKE_CASE__ )
# use a heap-based merge to merge stack efficiently
__lowerCamelCase : Any = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input('Enter numbers separated by a comma:\n').strip()
lowercase_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 230 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 230 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
try:
_lowerCAmelCase : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCAmelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCAmelCase : Optional[Any] = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
_lowerCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
_lowerCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
_lowerCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
_lowerCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_lowerCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_lowerCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_lowerCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_lowerCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; """,
)
# Beam
_lowerCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_lowerCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_lowerCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
_lowerCAmelCase : int = unittest.skip('test requires faiss' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
_lowerCAmelCase : Optional[int] = unittest.skip('test requires regex' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
_lowerCAmelCase : List[str] = unittest.skip('test requires elasticsearch' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
_lowerCAmelCase : Tuple = unittest.skip('test requires sqlalchemy' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
_lowerCAmelCase : Optional[Any] = unittest.skip('test requires PyTorch' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not config.TF_AVAILABLE:
_lowerCAmelCase : Tuple = unittest.skip('test requires TensorFlow' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
_lowerCAmelCase : Any = unittest.skip('test requires JAX' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
_lowerCAmelCase : Optional[int] = unittest.skip('test requires Pillow' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
_lowerCAmelCase : Optional[Any] = unittest.skip('test is slow' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
_lowerCAmelCase : List[str] = unittest.skip('test is local' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
_lowerCAmelCase : List[Any] = unittest.skip('test is packaged' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
_lowerCAmelCase : Optional[Any] = unittest.skip('test requires remote' )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCamelCase__ ( *_lowerCamelCase ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith('test' ):
for decorator in decorators:
_lowerCAmelCase : List[str] = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class __UpperCamelCase ( a_ ):
pass
class __UpperCamelCase ( a_ ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@contextmanager
def lowerCamelCase__ ( _lowerCamelCase=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase=1e-16 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = requests.Session().request
def timeout_request(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
# Change the url to an invalid url so that the connection hangs
_lowerCAmelCase : Optional[Any] = '''https://10.255.255.1'''
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_lowerCAmelCase : Optional[Any] = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_lowerCAmelCase : Tuple = url
_lowerCAmelCase : str = e.args[0]
_lowerCAmelCase : Optional[int] = (max_retry_error.args[0].replace('10.255.255.1' , f"""OfflineMock[{url}]""" ),)
_lowerCAmelCase : str = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
raise requests.ConnectionError('Offline mode is enabled.' , request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowerCamelCase__ ( *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def lowerCamelCase__ ( ):
'''simple docstring'''
import gc
gc.collect()
_lowerCAmelCase : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase__ ( ):
'''simple docstring'''
import gc
gc.collect()
_lowerCAmelCase : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist()
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
try:
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith('500' ) or str(SCREAMING_SNAKE_CASE_ ).startswith('502' ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ )
class __UpperCamelCase :
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = returncode
_lowerCAmelCase : Tuple = stdout
_lowerCAmelCase : Dict = stderr
async def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while True:
_lowerCAmelCase : str = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = []
def tee(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="" ):
_lowerCAmelCase : Tuple = line.decode('utf-8' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label='stderr:' ) ),
] , timeout=SCREAMING_SNAKE_CASE_ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=180 , _lowerCamelCase=False , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = asyncio.get_event_loop()
_lowerCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase : Any = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
_lowerCAmelCase : Optional[int] = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""\'{cmd_str}\' produced no output.""" )
return result
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
_lowerCAmelCase : Optional[int] = re.sub(R'^gw' , '' , SCREAMING_SNAKE_CASE_ , 0 , re.M )
return int(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 29500
_lowerCAmelCase : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 259 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Dict ={
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple =['PerceiverFeatureExtractor']
SCREAMING_SNAKE_CASE__ : int =['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 434 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase__ :int = "\\n\n"
lowercase__ :Optional[int] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowercase__ :List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) ,reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] ,)
def A__ ( self ,A__ ,A__ ,A__ = 1_6 ,A__ = True ,A__=None):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowercase = '''cuda'''
else:
lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowercase = AutoModelForCausalLM.from_pretrained(A__)
lowercase = model.to(A__)
lowercase = AutoTokenizer.from_pretrained(A__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowercase = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(A__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowercase = model.config.max_length - 1
else:
lowercase = model.config.max_length
lowercase = tokenizer(
A__ ,add_special_tokens=A__ ,padding=A__ ,truncation=A__ ,max_length=A__ ,return_tensors='''pt''' ,return_attention_mask=A__ ,).to(A__)
lowercase = encodings['''input_ids''']
lowercase = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) ,1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) ,2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowercase = []
lowercase = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 ,len(A__) ,A__)):
lowercase = min(start_index + batch_size ,len(A__))
lowercase = encoded_texts[start_index:end_index]
lowercase = attn_masks[start_index:end_index]
if add_start_token:
lowercase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(A__)
lowercase = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1)
lowercase = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa).to(A__), attn_mask] ,dim=1)
lowercase = encoded_batch
with torch.no_grad():
lowercase = model(A__ ,attention_mask=A__).logits
lowercase = out_logits[..., :-1, :].contiguous()
lowercase = labels[..., 1:].contiguous()
lowercase = attn_mask[..., 1:].contiguous()
lowercase = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2) ,A__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A__)}
| 717 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase__ :Optional[Any] = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase__ :List[str] = 10
lowercase__ :Tuple = 256
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self ,*,
A__ = 0.85 ,):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm)
lowercase = defaultdict(A__)
def A__ ( self ,A__ ,A__):
lowercase = self._index.query(A__)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(A__ ,A__)
if len(A__) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A__)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A__)
def A__ ( self):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(A__)
# reformat the cluster to be a list of dict
lowercase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A__)
return duplicate_clusters
def A__ ( self ,A__):
lowercase = self.get_duplicate_clusters()
with open(A__ ,'''w''') as f:
json.dump(A__ ,A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase__ :List[Any] = None
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowercase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 0.85 ):
'''simple docstring'''
lowercase = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(lowerCAmelCase__ )}' )
print(f'Number of duplicate clusters: {len(lowerCAmelCase__ )}' )
print(f'Files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Unique files in duplicate cluster: {len(lowerCAmelCase__ )}' )
print(f'Filtered dataset size: {len(lowerCAmelCase__ )}' )
return ds_filter, duplicate_clusters
| 633 | 0 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCamelCase__ : Union[str, Any] = datasets.logging.get_logger(__name__)
lowerCamelCase__ : Any = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCamelCase__ : int = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCamelCase__ : int = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCamelCase__ : Dict = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def __snake_case ( self , _A ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').""" )
_UpperCAmelCase : int = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_UpperCAmelCase : Optional[Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_UpperCAmelCase : List[str] = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_UpperCAmelCase : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_UpperCAmelCase : Dict = score.BleurtScorer(os.path.join(_A , _A ) )
def __snake_case ( self , _A , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.scorer.score(references=_A , candidates=_A )
return {"scores": scores}
| 238 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A : List[str] = get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
a = "dummy_data"
a = "datasets"
a = False
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[Version, str] , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[List[Callable]] = None , ):
_A : Dict = 0
_A : Dict = dataset_name
_A : Any = cache_dir
_A : List[str] = use_local_dummy_data
_A : Optional[Any] = config
# download_callbacks take a single url as input
_A : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_A : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_A : Any = str(SCREAMING_SNAKE_CASE)
# to be downloaded
_A : Optional[Any] = None
_A : List[str] = None
@property
def A ( self : str):
if self._dummy_file is None:
_A : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A ( self : Optional[Any]):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def A ( self : Tuple):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def A ( self : int):
_A : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_A : Optional[int] = cached_path(
SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE , force_extract=SCREAMING_SNAKE_CASE)
return os.path.join(SCREAMING_SNAKE_CASE , self.dummy_file_name)
@property
def A ( self : List[str]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def A ( self : str):
if self._bucket_url is None:
_A : Tuple = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def A ( self : str):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def A ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_A : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_A : Dict = self.dummy_file_name
# special case when data_url is a dict
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple)):
return self.create_dummy_data_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
else:
return self.create_dummy_data_single(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , *SCREAMING_SNAKE_CASE : str):
return self.download_and_extract(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple):
return self.download_and_extract(SCREAMING_SNAKE_CASE)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any):
return path
def A ( self : str):
return {}
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
for single_url in single_urls:
download_callback(SCREAMING_SNAKE_CASE)
else:
_A : Optional[Any] = single_urls
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE).name)) for x in single_urls]
else:
_A : str = single_urls
_A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE).name))
_A : Tuple = value
# make sure that values are unique
if all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_A : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_A : Union[str, Any] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , SCREAMING_SNAKE_CASE)) for url in data_url)
_A : Optional[Any] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_A : Optional[Any] = [data_url[0]] * len(SCREAMING_SNAKE_CASE)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A : Any = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(SCREAMING_SNAKE_CASE)
return dummy_data_list
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any):
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(SCREAMING_SNAKE_CASE) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A ( self : str):
pass
def A ( self : str):
pass
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]):
def _iter_archive_members(SCREAMING_SNAKE_CASE : str):
# this preserves the order of the members inside the ZIP archive
_A : Dict = Path(self.dummy_file).parent
_A : Union[str, Any] = path.relative_to(SCREAMING_SNAKE_CASE)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_A : Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE)
_A : str = Path(SCREAMING_SNAKE_CASE)
_A : Any = _iter_archive_members(SCREAMING_SNAKE_CASE) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(SCREAMING_SNAKE_CASE).as_posix(), file_path.open('rb')
def A ( self : int , SCREAMING_SNAKE_CASE : Tuple):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Tuple = [paths]
for path in paths:
if os.path.isfile(SCREAMING_SNAKE_CASE):
if os.path.basename(SCREAMING_SNAKE_CASE).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE):
if os.path.basename(SCREAMING_SNAKE_CASE).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(SCREAMING_SNAKE_CASE):
if filename.startswith(('.', '__')):
continue
yield os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 128 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 719 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase ( ) -> Optional[Any]:
UpperCAmelCase_: Union[str, Any] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
UpperCAmelCase_: List[str] = Image.open(requests.get(_a ,stream=_a ).raw ).convert("RGB" )
return image
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def lowercase ( _a ,_a ,_a ) -> Optional[Any]:
UpperCAmelCase_: List[Any] = dct.pop(_a )
UpperCAmelCase_: Union[str, Any] = val
def lowercase ( _a ,_a ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_: List[str] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
UpperCAmelCase_: Dict = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
UpperCAmelCase_: int = torch.cat((q_bias, torch.zeros_like(_a ,requires_grad=_a ), v_bias) )
UpperCAmelCase_: Dict = qkv_bias
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Optional[int] = 364 if "coco" in model_name else 224
UpperCAmelCase_: Union[str, Any] = InstructBlipVisionConfig(image_size=_a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase_: Optional[int] = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_: int = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase_: Optional[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" ,vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase_: int = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" ,vocab_size=32001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase_: Optional[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
UpperCAmelCase_: List[Any] = InstructBlipConfig(vision_config=_a ,text_config=_a ,qformer_config=_a )
return config, image_size
@torch.no_grad()
def lowercase ( _a ,_a=None ,_a=False ) -> Tuple:
UpperCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained("bert-base-uncased" ,truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
UpperCAmelCase_: Union[str, Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" ,truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase_: List[str] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" ,truncation_side="left" ,bos_token="</s>" ,unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
UpperCAmelCase_ , UpperCAmelCase_: Tuple = get_blipa_config(_a )
UpperCAmelCase_: List[Any] = InstructBlipForConditionalGeneration(_a ).eval()
UpperCAmelCase_: Optional[int] = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
UpperCAmelCase_: List[str] = "cuda:1" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_: List[str] = "cuda:2" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = load_model_and_preprocess(
name=_a ,model_type=_a ,is_eval=_a ,device=_a )
original_model.eval()
print("Done!" )
# update state dict keys
UpperCAmelCase_: Optional[int] = original_model.state_dict()
UpperCAmelCase_: Dict = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a ,_a ,_a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_: Optional[int] = state_dict.pop(_a )
if key.startswith("Qformer.bert" ):
UpperCAmelCase_: List[str] = key.replace("Qformer.bert" ,"qformer" )
if "attention.self" in key:
UpperCAmelCase_: Optional[int] = key.replace("self" ,"attention" )
if "llm_proj" in key:
UpperCAmelCase_: List[Any] = key.replace("llm_proj" ,"language_projection" )
if "t5_proj" in key:
UpperCAmelCase_: int = key.replace("t5_proj" ,"language_projection" )
if key.startswith("llm_model" ):
UpperCAmelCase_: Optional[Any] = key.replace("llm_model" ,"language_model" )
if key.startswith("t5" ):
UpperCAmelCase_: List[Any] = key.replace("t5" ,"language" )
UpperCAmelCase_: int = val
# read in qv biases
read_in_q_v_bias(_a ,_a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_a ,strict=_a )
UpperCAmelCase_: Optional[int] = load_demo_image()
UpperCAmelCase_: int = "What is unusual about this image?"
# create processor
UpperCAmelCase_: Dict = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=_a ,image_std=_a )
UpperCAmelCase_: List[str] = InstructBlipProcessor(
image_processor=_a ,tokenizer=_a ,qformer_tokenizer=_a ,)
UpperCAmelCase_: Optional[int] = processor(images=_a ,text=_a ,return_tensors="pt" ).to(_a )
# make sure processor creates exact same pixel values
UpperCAmelCase_: int = vis_processors["eval"](_a ).unsqueeze(0 ).to(_a )
UpperCAmelCase_: Any = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_a )
original_model.to(_a )
hf_model.to(_a )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase_: Union[str, Any] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
UpperCAmelCase_: List[Any] = hf_model(**_a ).logits
else:
UpperCAmelCase_: Union[str, Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
UpperCAmelCase_: List[str] = tokenizer("\n" ,return_tensors="pt" ).input_ids.to(_a )
UpperCAmelCase_: Union[str, Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
UpperCAmelCase_: Optional[Any] = hf_model(**_a ,labels=_a ).logits
print("First values of original logits:" ,original_logits[0, :3, :3] )
print("First values of HF logits:" ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase_: Dict = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,_a ,atol=_a )
print("Looks ok!" )
print("Generating with original model..." )
UpperCAmelCase_: str = original_model.generate({"image": original_pixel_values, "prompt": prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
UpperCAmelCase_: List[Any] = hf_model.generate(
**_a ,do_sample=_a ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase_: int = 2
print("Original generation:" ,_a )
UpperCAmelCase_: List[str] = processor.batch_decode(_a ,skip_special_tokens=_a )
UpperCAmelCase_: List[Any] = [text.strip() for text in output_text]
print("HF generation:" ,_a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_a )
hf_model.save_pretrained(_a )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
_lowerCAmelCase = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
_lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 306 | 0 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = EfficientFormerImageProcessorTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 87 | 0 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> bool:
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase_ )
lowerCAmelCase__ = len(lowerCamelCase_ )
lowerCAmelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase__ = True
for i in range(lowerCamelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase__ = True
if a[i].islower():
lowerCAmelCase__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE__ = False
# No `output_type`.
SCREAMING_SNAKE_CASE__ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') ,up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') ,cross_attention_dim=32 ,attention_head_dim=4 ,)
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
lowerCAmelCase__ = CLIPTextModel(a_ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=0 ):
"""simple docstring"""
# 3 frames
lowerCAmelCase__ = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(a_ )
else:
lowerCAmelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = VideoToVideoSDPipeline(**a_ )
lowerCAmelCase__ = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase__ = self.get_dummy_inputs(a_ )
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = sd_pipe(**a_ ).frames
lowerCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowerCAmelCase__ = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ ,expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ = torch.randn((1, 10, 3, 1024, 576) ,generator=a_ )
lowerCAmelCase__ = video.to('cuda' )
lowerCAmelCase__ = 'Spiderman is surfing'
lowerCAmelCase__ = pipe(a_ ,video=a_ ,generator=a_ ,num_inference_steps=3 ,output_type='pt' ).frames
lowerCAmelCase__ = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 604 | 0 |
from math import factorial
def UpperCamelCase_( snake_case__: Tuple = 1_00 ) -> Tuple:
return sum(map(__UpperCAmelCase , str(factorial(__UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 146 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ : List[Any] = {'facebook/blenderbot_small-90M': 5_12}
def __a ( __UpperCAmelCase ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
a__ = set(__UpperCAmelCase )
return pairs
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="__start__" , SCREAMING_SNAKE_CASE="__end__" , SCREAMING_SNAKE_CASE="__unk__" , SCREAMING_SNAKE_CASE="__null__" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
a__ = json.load(SCREAMING_SNAKE_CASE )
a__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
a__ = merges_handle.read().split('''\n''' )[1:-1]
a__ = [tuple(merge.split() ) for merge in merges]
a__ = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
a__ = {}
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
if token in self.cache:
return self.cache[token]
a__ = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE )
a__ = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE )
a__ = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE )
if "\n" in token:
a__ = token.replace('''\n''' , ''' __newln__''' )
a__ = token.split(''' ''' )
a__ = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE ):
continue
a__ = token.lower()
a__ = tuple(SCREAMING_SNAKE_CASE )
a__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a__ = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
words.append(SCREAMING_SNAKE_CASE )
continue
while True:
a__ = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
a__ = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
a__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(SCREAMING_SNAKE_CASE )
a__ = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
a__ = get_pairs(SCREAMING_SNAKE_CASE )
a__ = '''@@ '''.join(SCREAMING_SNAKE_CASE )
a__ = word[:-4]
a__ = word
words.append(SCREAMING_SNAKE_CASE )
return " ".join(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = []
a__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
a__ = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
a__ = ''' '''.join(SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a__ = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a__ = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + '''\n''' )
a__ = 0
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
a__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 194 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Tuple = False
try:
_SCREAMING_SNAKE_CASE : Union[str, Any] = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : str = None , __lowerCamelCase : list = [] ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '''*'''
else:
SCREAMING_SNAKE_CASE__ = '''➔ '''
def lowercase_ ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str = "" ) -> int:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __lowerCamelCase )
else:
forceWrite(self.choices[index] , __lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int ) -> Optional[Any]:
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(__lowerCamelCase )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def lowercase_ ( self : Tuple , __lowerCamelCase : Direction , __lowerCamelCase : int = 1 ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__lowerCamelCase )
move_cursor(__lowerCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def lowercase_ ( self : Any ) -> List[str]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def lowercase_ ( self : Tuple ) -> Union[str, Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def lowercase_ ( self : Tuple ) -> str:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__lowerCamelCase )] for number in range(10 )] )
def lowercase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __lowerCamelCase )
else:
return
else:
return
def lowercase_ ( self : Any , __lowerCamelCase : int = 0 ) -> List[str]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__lowerCamelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__lowerCamelCase , '''\n''' )
return choice
| 472 |
import math
class UpperCAmelCase__ :
"""simple docstring"""
def lowercase_ ( self : int , __lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 0.0
for i in range(len(__lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self : Optional[int] , __lowerCamelCase : list[list[int | float]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : float ) -> list[list[int | float]]:
for i in range(len(__lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
SCREAMING_SNAKE_CASE__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
SCREAMING_SNAKE_CASE__ = SelfOrganizingMap()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 0.5
for _ in range(_A ):
for j in range(len(_A ) ):
# training sample
SCREAMING_SNAKE_CASE__ = training_samples[j]
# Compute the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# Update the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.update(_A , _A , _A , _A )
# classify test sample
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 1]
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(_A , _A )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 472 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.dummy_uncond_unet
A_ = PNDMScheduler()
A_ = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pndm.to(UpperCamelCase__ )
pndm.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="""numpy""" ).images
A_ = torch.manual_seed(0 )
A_ = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCamelCase__ )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = """google/ddpm-cifar10-32"""
A_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
A_ = PNDMScheduler()
A_ = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pndm.to(UpperCamelCase__ )
pndm.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = pndm(generator=UpperCamelCase__ , output_type="""numpy""" ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 288 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 1e-12, UpperCAmelCase__ = 1_00, ) -> tuple[float, np.ndarray]:
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
A_ = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A_ = False
A_ = 0
A_ = 0
A_ = 1e12
while not convergence:
# Multiple matrix by the vector.
A_ = np.dot(UpperCAmelCase__, UpperCAmelCase__ )
# Normalize the resulting output vector.
A_ = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A_ = vector.conj().T if is_complex else vector.T
A_ = np.dot(UpperCAmelCase__, np.dot(UpperCAmelCase__, UpperCAmelCase__ ) )
# Check convergence.
A_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A_ = True
A_ = lambda_
if is_complex:
A_ = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase__ ( ) -> None:
A_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A_ = np.array([41, 4, 20] )
A_ = real_input_matrix.astype(np.complexaaa )
A_ = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A_ = real_input_matrix
A_ = real_vector
elif problem_type == "complex":
A_ = complex_input_matrix
A_ = complex_vector
# Our implementation.
A_ , A_ = power_iteration(UpperCAmelCase__, UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A_ , A_ = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
A_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 288 | 1 |
def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
move_disk(lowerCAmelCase__ , lowerCAmelCase__ )
move_tower(height - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
print('moving disk from' , lowerCAmelCase__ , 'to' , lowerCAmelCase__ )
def _a ( ):
"""simple docstring"""
_lowerCAmelCase = int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCAmelCase__ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 714 |
import copy
import random
from transformers import CLIPTokenizer
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ) -> str:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = {}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> List[Any]:
_lowerCAmelCase = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int , *_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=1 , **_lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
_lowerCAmelCase = []
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = placeholder_token + F'''_{i}'''
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
_lowerCAmelCase = output
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=False , _lowerCAmelCase : int=1.0 ) -> List[str]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCAmelCase = self.token_map[placeholder_token]
_lowerCAmelCase = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCAmelCase = copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
_lowerCAmelCase = text.replace(_lowerCAmelCase , ' '.join(_lowerCAmelCase ) )
return text
def __call__( self : Any , _lowerCAmelCase : Any , *_lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[int]=1.0 , **_lowerCAmelCase : int ) -> int:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : str , *_lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=1.0 , **_lowerCAmelCase : str ) -> List[str]:
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 585 | 0 |
def __lowerCamelCase ( __a :Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = len(__a )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
A : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
A : int = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 176 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Dict = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''glpn'''
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=[2, 2, 2, 2] , __lowerCAmelCase : int=[8, 4, 2, 1] , __lowerCAmelCase : int=[32, 64, 1_60, 2_56] , __lowerCAmelCase : str=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[1, 2, 5, 8] , __lowerCAmelCase : List[Any]=[4, 4, 4, 4] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=1e-6 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : Union[str, Any]=-1 , **__lowerCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = max_depth
A__ = head_in_index
| 176 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 701 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = "sgugger/tiny-distilbert-classification"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : int = PyTorchBenchmark(UpperCamelCase__ )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tiny-gpt2"
snake_case : int = AutoConfig.from_pretrained(UpperCamelCase__ )
# set architectures equal to `None`
snake_case : str = None
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[str] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tinier_bart"
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : str = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = "sshleifer/tinier_bart"
snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , "train_time.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 117 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ : Any = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , *snake_case__ : Dict , **snake_case__ : Any ):
"""simple docstring"""
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 439 |
import argparse
import os
import re
a_ : List[str] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ : Optional[Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
a_ : Optional[int] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : bool = False ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = content.split('\n' )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while line_idx < len(_UpperCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
SCREAMING_SNAKE_CASE = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
SCREAMING_SNAKE_CASE = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
SCREAMING_SNAKE_CASE = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : _re_identifier.search(_UpperCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_UpperCamelCase ) )
elif "\n".join(_UpperCamelCase ) != content:
return True
def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for f in os.listdir(_UpperCamelCase ) if f.endswith('.py' )]
SCREAMING_SNAKE_CASE = [sort_auto_mapping(_UpperCamelCase , overwrite=_UpperCamelCase ) for fname in fnames]
if not overwrite and any(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = [f for f, d in zip(_UpperCamelCase , _UpperCamelCase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(_UpperCamelCase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
a_ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 439 | 1 |
import math
def A__( __lowerCAmelCase = 1_00 ):
_snake_case : Tuple = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : int = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = 'gpt_neox'
def __init__( self : str , lowerCAmelCase__ : Optional[int]=50432 , lowerCAmelCase__ : Optional[int]=6144 , lowerCAmelCase__ : Optional[Any]=44 , lowerCAmelCase__ : Optional[Any]=64 , lowerCAmelCase__ : List[str]=24576 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Any=0.25 , lowerCAmelCase__ : str=10000 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : int , ) -> Tuple:
'''simple docstring'''
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , lowerCAmelCase__ )
_UpperCamelCase = self.rope_scaling.get('''factor''' , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[Any] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase__ : Any = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase__ : Optional[Any] = None
def A_( ):
UpperCAmelCase_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A_( A ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def A_( A ):
def remove_articles(A ):
return ARTICLES_REGEX.sub(""" """ , A )
def white_space_fix(A ):
return " ".join(text.split() )
def remove_punc(A ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A ) ) ) )
def A_( A ):
if not s:
return []
return normalize_answer(A ).split()
def A_( A , A ):
return int(normalize_answer(A ) == normalize_answer(A ) )
def A_( A , A ):
UpperCAmelCase_ = get_tokens(A )
UpperCAmelCase_ = get_tokens(A )
UpperCAmelCase_ = collections.Counter(A ) & collections.Counter(A )
UpperCAmelCase_ = sum(common.values() )
if len(A ) == 0 or len(A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(A )
UpperCAmelCase_ = 1.0 * num_same / len(A )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def A_( A , A ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["""id"""]
UpperCAmelCase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(A , A ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(A , A ) for a in gold_answers )
return exact_scores, fa_scores
def A_( A , A , A , A ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def A_( A , A , A=None ):
if not qid_list:
UpperCAmelCase_ = len(A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCAmelCase_ = len(A )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def A_( A , A , A ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def A_( A , A , A , A ):
plt.step(A , A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(A , A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A )
plt.savefig(A )
plt.clf()
def A_( A , A , A , A , A=None , A=None ):
UpperCAmelCase_ = sorted(A , key=lambda A : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(A )
if i == len(A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A )
recalls.append(A )
if out_image:
plot_pr_curve(A , A , A , A )
return {"ap": 100.0 * avg_prec}
def A_( A , A , A , A , A , A ):
if out_image_dir and not os.path.exists(A ):
os.makedirs(A )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
A , A , A , A , out_image=os.path.join(A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCAmelCase_ = make_precision_recall_eval(
A , A , A , A , out_image=os.path.join(A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCAmelCase_ = {k: float(A ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
A , A , A , A , out_image=os.path.join(A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(A , A , """pr_exact""" )
merge_eval(A , A , """pr_f1""" )
merge_eval(A , A , """pr_oracle""" )
def A_( A , A , A , A ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(A ) / float(len(A ) )
plt.hist(A , weights=A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(A , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def A_( A , A , A , A ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(A , key=lambda A : na_probs[k] )
for i, qid in enumerate(A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(A ), best_thresh
def A_( A , A , A , A , A , A ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(A , A , A , A )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(A , A , A , A )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def A_( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(A )
UpperCAmelCase_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(A )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(A ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(A , A )
UpperCAmelCase_ = apply_no_ans_threshold(A , A , A , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(A , A , A , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(A , A )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(A , A , qid_list=A )
merge_eval(A , A , """HasAns""" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(A , A , qid_list=A )
merge_eval(A , A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(A , A , A , A , A , A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A , A , A , A , A , OPTS.out_image_dir )
histogram_na_prob(A , A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(A , A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(A , A )
else:
print(json.dumps(A , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase__ : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 715 |
from torch import nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : List[str] , __lowercase : Dict ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = class_size
UpperCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase_ = nn.Linear(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.mlp(__lowercase )
return logits
| 486 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
snake_case : List[str] = TypeVar("T")
class _snake_case ( Generic[T] ):
def __init__( self , _a ):
__magic_name__ : Any = data
__magic_name__ : Optional[int] = self
__magic_name__ : Union[str, Any] = 0
class _snake_case ( Generic[T] ):
def __init__( self ):
# map from node name to the node object
__magic_name__ : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE ( self , _a ):
# create a new set with x as its member
__magic_name__ : Optional[Any] = DisjointSetTreeNode(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
# find the set x belongs to (with path-compression)
__magic_name__ : int = self.map[data]
if elem_ref != elem_ref.parent:
__magic_name__ : str = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
# helper function for union operation
if nodea.rank > nodea.rank:
__magic_name__ : Optional[int] = nodea
else:
__magic_name__ : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
# merge 2 disjoint sets
self.link(self.find_set(_a ) , self.find_set(_a ) )
class _snake_case ( Generic[T] ):
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
__magic_name__ : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE ( self , _a ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
__magic_name__ : int = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
# add an edge with the given weight
self.add_node(_a )
self.add_node(_a )
__magic_name__ : int = weight
__magic_name__ : Optional[Any] = weight
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = []
__magic_name__ : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _a : x[2] )
# creating the disjoint set
__magic_name__ : List[str] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_a )
# MST generation
__magic_name__ : Optional[Any] = 0
__magic_name__ : Any = 0
__magic_name__ : Optional[int] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = edges[index]
index += 1
__magic_name__ : Optional[Any] = disjoint_set.find_set(_a )
__magic_name__ : Optional[int] = disjoint_set.find_set(_a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_a , _a , _a )
disjoint_set.union(_a , _a )
return graph
| 124 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case : Optional[int] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( _snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
__magic_name__ : List[str] = [image]
__magic_name__ : Optional[int] = [trans(img.convert("RGB" ) ) for img in image]
__magic_name__ : List[str] = torch.stack(_snake_case )
return image
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__magic_name__ : Optional[int] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__magic_name__ : List[str] = min(int(num_inference_steps * strength ) , _a )
__magic_name__ : Dict = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_a )}''' )
__magic_name__ : Any = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_a )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__magic_name__ : Optional[int] = init_latents.shape
__magic_name__ : Optional[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
# get latents
print("add noise to latents at timestep" , _a )
__magic_name__ : List[Any] = self.scheduler.add_noise(_a , _a , _a )
__magic_name__ : int = init_latents
return latents
@torch.no_grad()
def __call__( self , _a = None , _a = 0.8 , _a = 1 , _a = None , _a = 0.0 , _a = 50 , _a = None , _a = "pil" , _a = True , ):
self.check_inputs(_a )
# 2. Preprocess image
__magic_name__ : str = preprocess(_a )
# 3. set timesteps
self.scheduler.set_timesteps(_a , device=self.device )
__magic_name__ , __magic_name__ : str = self.get_timesteps(_a , _a , self.device )
__magic_name__ : List[str] = timesteps[:1].repeat(_a )
# 4. Prepare latent variables
__magic_name__ : Optional[int] = self.prepare_latents(_a , _a , _a , self.unet.dtype , self.device , _a )
__magic_name__ : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_a ):
# 1. predict noise model_output
__magic_name__ : str = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__magic_name__ : str = self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a , ).prev_sample
__magic_name__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Optional[int] = self.numpy_to_pil(_a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_a )
| 124 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.json'''}
SCREAMING_SNAKE_CASE : Any = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
SCREAMING_SNAKE_CASE : List[str] = {'''mgp-str''': 27}
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , a_ , a_="[GO]" , a_="[GO]" , a_="[s]" , a_="[GO]" , **a_ ):
'''simple docstring'''
super().__init__(
unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
__snake_case : int = json.load(__UpperCamelCase )
__snake_case : Tuple = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.vocab )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = []
for s in text:
char_tokens.extend(__UpperCamelCase )
return char_tokens
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.decoder.get(__UpperCamelCase )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCamelCase ) )
return
__snake_case : str = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
return (vocab_file,)
| 719 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , ) ->tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCamelCase_( _A :Dict , _A :int=False )-> Dict:
try:
UpperCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
__UpperCamelCase = parse_flag_from_env('RUN_SLOW', default=False)
__UpperCamelCase = parse_flag_from_env('RUN_REMOTE', default=False)
__UpperCamelCase = parse_flag_from_env('RUN_LOCAL', default=True)
__UpperCamelCase = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__UpperCamelCase = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def UpperCamelCase_( _A :List[Any] )-> Union[str, Any]:
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires faiss" )(_A )
return test_case
def UpperCamelCase_( _A :List[Any] )-> Union[str, Any]:
try:
import regex # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires regex" )(_A )
return test_case
def UpperCamelCase_( _A :List[Any] )-> List[str]:
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires elasticsearch" )(_A )
return test_case
def UpperCamelCase_( _A :List[str] )-> Union[str, Any]:
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ = unittest.skip("test requires sqlalchemy" )(_A )
return test_case
def UpperCamelCase_( _A :int )-> Any:
if not config.TORCH_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires PyTorch" )(_A )
return test_case
def UpperCamelCase_( _A :str )-> List[Any]:
if not config.TF_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires TensorFlow" )(_A )
return test_case
def UpperCamelCase_( _A :List[Any] )-> Tuple:
if not config.JAX_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires JAX" )(_A )
return test_case
def UpperCamelCase_( _A :Any )-> Dict:
if not config.PIL_AVAILABLE:
UpperCamelCase__ = unittest.skip("test requires Pillow" )(_A )
return test_case
def UpperCamelCase_( _A :List[str] )-> int:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(_A )
else:
return test_case
def UpperCamelCase_( _A :Tuple )-> int:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(_A )
else:
return test_case
def UpperCamelCase_( _A :Optional[Any] )-> Any:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(_A )
else:
return test_case
def UpperCamelCase_( _A :str )-> str:
def _require_spacy_model(_A :Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("test requires spacy" )(_A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def UpperCamelCase_( _A :str )-> List[str]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(_A )
else:
return test_case
def UpperCamelCase_( _A :Tuple )-> Optional[int]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(_A )
else:
return test_case
def UpperCamelCase_( _A :Tuple )-> List[Any]:
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ = unittest.skip("test is slow" )(_A )
return test_case
def UpperCamelCase_( _A :Union[str, Any] )-> Optional[Any]:
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ = unittest.skip("test is local" )(_A )
return test_case
def UpperCamelCase_( _A :Union[str, Any] )-> Dict:
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ = unittest.skip("test is packaged" )(_A )
return test_case
def UpperCamelCase_( _A :Optional[int] )-> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ = unittest.skip("test requires remote" )(_A )
return test_case
def UpperCamelCase_( *_A :str )-> Optional[int]:
def decorate(cls :Any ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("test" ):
for decorator in decorators:
UpperCamelCase__ = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
pass
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : str = 1
_UpperCamelCase : str = 2
@contextmanager
def UpperCamelCase_( _A :List[Any]=OfflineSimulationMode.CONNECTION_FAILS , _A :Dict=1e-16 )-> Optional[Any]:
UpperCamelCase__ = requests.Session().request
def timeout_request(_A :Optional[Any] , _A :List[Any] , _A :Optional[Any] , **_A :Any ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCamelCase__ = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ = url
UpperCamelCase__ = e.args[0]
UpperCamelCase__ = (max_retry_error.args[0].replace("10.255.255.1" , F'''OfflineMock[{url}]''' ),)
UpperCamelCase__ = (max_retry_error,)
raise
def raise_connection_error(_A :Union[str, Any] , _A :Dict , **_A :Optional[Any] ):
raise requests.ConnectionError("Offline mode is enabled." , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def UpperCamelCase_( *_A :Optional[Any] , **_A :Dict )-> int:
UpperCamelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def UpperCamelCase_( )-> Union[str, Any]:
import gc
gc.collect()
UpperCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCamelCase_( )-> Optional[Any]:
import gc
gc.collect()
UpperCamelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCamelCase_( _A :List[str] , _A :List[str] )-> List[Any]:
return deepcopy(_A ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_A ).integers(0 , 1_00 , 10 ).tolist()
def UpperCamelCase_( _A :Optional[Any] )-> int:
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A :Optional[Any] , *_A :Union[str, Any] , **_A :List[Any] ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("500" ) or str(_A ).startswith("502" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = returncode
UpperCamelCase__ = stdout
UpperCamelCase__ = stderr
async def UpperCamelCase_( _A :Any , _A :Optional[Any] )-> List[str]:
while True:
UpperCamelCase__ = await stream.readline()
if line:
callback(_A )
else:
break
async def UpperCamelCase_( _A :str , _A :List[str]=None , _A :Tuple=None , _A :List[Any]=None , _A :List[Any]=False , _A :Any=False )-> _RunOutput:
if echo:
print("\nRunning: " , " ".join(_A ) )
UpperCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ = []
UpperCamelCase__ = []
def tee(_A :int , _A :Optional[Any] , _A :List[str] , _A :Optional[int]="" ):
UpperCamelCase__ = line.decode("utf-8" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="stderr:" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def UpperCamelCase_( _A :int , _A :Optional[int]=None , _A :Dict=None , _A :str=1_80 , _A :Union[str, Any]=False , _A :int=True )-> _RunOutput:
UpperCamelCase__ = asyncio.get_event_loop()
UpperCamelCase__ = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
UpperCamelCase__ = " ".join(_A )
if result.returncode > 0:
UpperCamelCase__ = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def UpperCamelCase_( )-> Any:
UpperCamelCase__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
UpperCamelCase__ = re.sub(r"^gw" , "" , _A , 0 , re.M )
return int(_A )
def UpperCamelCase_( )-> Dict:
UpperCamelCase__ = 2_95_00
UpperCamelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 551 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 0
def snake_case__ ( self ):
'''simple docstring'''
return self.head == self.tail
def snake_case__ ( self , snake_case ):
'''simple docstring'''
self.data.append(snake_case )
UpperCamelCase__ = self.tail + 1
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.data[self.head]
UpperCamelCase__ = self.head + 1
return ret
def snake_case__ ( self ):
'''simple docstring'''
return self.tail - self.head
def snake_case__ ( self ):
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = data
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
def snake_case__ ( self ):
'''simple docstring'''
return self.data
def snake_case__ ( self ):
'''simple docstring'''
return self.left
def snake_case__ ( self ):
'''simple docstring'''
return self.right
def snake_case__ ( self ):
'''simple docstring'''
return self.height
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = data
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = node
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = node
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = height
def UpperCamelCase_( _A :MyNode | None )-> int:
if node is None:
return 0
return node.get_height()
def UpperCamelCase_( _A :int , _A :int )-> int:
if a > b:
return a
return b
def UpperCamelCase_( _A :MyNode )-> MyNode:
print("left rotation node:" , node.get_data() )
UpperCamelCase__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_A )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_A )
UpperCamelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_A )
return ret
def UpperCamelCase_( _A :MyNode )-> MyNode:
print("right rotation node:" , node.get_data() )
UpperCamelCase__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_A )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_A )
UpperCamelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_A )
return ret
def UpperCamelCase_( _A :MyNode )-> MyNode:
UpperCamelCase__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_A ) )
return right_rotation(_A )
def UpperCamelCase_( _A :MyNode )-> MyNode:
UpperCamelCase__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_A ) )
return left_rotation(_A )
def UpperCamelCase_( _A :MyNode | None , _A :Any )-> MyNode | None:
if node is None:
return MyNode(_A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCamelCase__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase__ = right_rotation(_A )
else:
UpperCamelCase__ = lr_rotation(_A )
else:
node.set_right(insert_node(node.get_right() , _A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCamelCase__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase__ = rl_rotation(_A )
else:
UpperCamelCase__ = left_rotation(_A )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_A )
return node
def UpperCamelCase_( _A :MyNode )-> Any:
while True:
UpperCamelCase__ = root.get_right()
if right_child is None:
break
UpperCamelCase__ = right_child
return root.get_data()
def UpperCamelCase_( _A :MyNode )-> Any:
while True:
UpperCamelCase__ = root.get_left()
if left_child is None:
break
UpperCamelCase__ = left_child
return root.get_data()
def UpperCamelCase_( _A :MyNode , _A :Any )-> MyNode | None:
UpperCamelCase__ = root.get_left()
UpperCamelCase__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase__ = get_left_most(_A )
root.set_data(_A )
root.set_right(del_node(_A , _A ) )
elif left_child is not None:
UpperCamelCase__ = left_child
elif right_child is not None:
UpperCamelCase__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_A , _A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_A , _A ) )
if get_height(_A ) - get_height(_A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCamelCase__ = left_rotation(_A )
else:
UpperCamelCase__ = rl_rotation(_A )
elif get_height(_A ) - get_height(_A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCamelCase__ = right_rotation(_A )
else:
UpperCamelCase__ = lr_rotation(_A )
UpperCamelCase__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_A )
return root
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ = None
def snake_case__ ( self ):
'''simple docstring'''
return get_height(self.root )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
print("insert:" + str(snake_case ) )
UpperCamelCase__ = insert_node(self.root , snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
print("delete:" + str(snake_case ) )
if self.root is None:
print("Tree is empty!" )
return
UpperCamelCase__ = del_node(self.root , snake_case )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = MyQueue()
q.push(self.root )
UpperCamelCase__ = self.get_height()
if layer == 0:
return output
UpperCamelCase__ = 0
while not q.is_empty():
UpperCamelCase__ = q.pop()
UpperCamelCase__ = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(snake_case )
q.push(snake_case )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase__ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , snake_case ) - 1:
UpperCamelCase__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCamelCase_( )-> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCamelCase = AVLtree()
__UpperCamelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 551 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 , __lowerCamelCase = "bert-base-cased" ) -> List[str]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(__lowerCamelCase )
lowercase__ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Tuple = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Optional[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
model.eval()
lowercase__ : Dict = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : str = model(**__lowerCamelCase )
lowercase__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ : Tuple = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCamelCase ) - 1:
lowercase__ : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[Any] = metric.compute()
return eval_metric["accuracy"]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# Initialize accelerator
lowercase__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Tuple = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : Optional[Any] = args.model_name_or_path
set_seed(__lowerCamelCase )
lowercase__ , lowercase__ : str = get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
# Instantiate optimizer
lowercase__ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : int = optimizer_cls(params=model.parameters() , lr=__lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowercase__ : List[Any] = 1
lowercase__ : Tuple = (len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , )
else:
lowercase__ : Optional[Any] = DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : int = 0
lowercase__ : Dict = evaluate.load('''glue''' , '''mrpc''' )
lowercase__ : Optional[int] = num_epochs
if args.partial_train_epoch is not None:
lowercase__ : Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowercase__ : Optional[Any] = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowercase__ : Optional[Any] = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowercase__ : Optional[Any] = int(__lowerCamelCase ) + 1
lowercase__ : str = evaluation_loop(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.print('''resumed checkpoint performance:''' , __lowerCamelCase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowercase__ : str = json.load(__lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowercase__ : Tuple = {}
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
lowercase__ : str = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowercase__ : List[str] = f"""epoch_{epoch}"""
lowercase__ : Dict = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
lowercase__ : Tuple = evaluation_loop(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : List[str] = accuracy
lowercase__ : Optional[int] = lr_scheduler.get_lr()[0]
lowercase__ : Tuple = optimizer.param_groups[0]['''lr''']
lowercase__ : List[Any] = epoch
lowercase__ : Dict = overall_step
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( ) -> Any:
lowercase__ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__lowerCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__lowerCamelCase , )
parser.add_argument(
'''--output_dir''' , type=__lowerCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__lowerCamelCase , default=__lowerCamelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__lowerCamelCase , default=__lowerCamelCase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__lowerCamelCase , default=2 , help='''Number of train epochs.''' , )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 122 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase_ = 128_022
lowerCAmelCase_ = 128_028
@require_sentencepiece
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = MaMaaaTokenizer
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : int = True
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
super().setUp()
lowercase__ : Any = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase__ : int = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Optional[Any] = Path(self.tmpdirname )
save_json(_snake_case ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase__ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = '''</s>'''
lowercase__ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : str = self.get_tokenizer()
lowercase__ : List[str] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(_snake_case ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) ,[2, 3, 4, 5, 6] ,)
lowercase__ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_snake_case ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_string(_snake_case )
self.assertEqual(_snake_case ,'''This is a test''' )
@slow
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "facebook/m2m100_418M"
lowerCAmelCase : Any = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
lowerCAmelCase : Union[str, Any] = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
lowerCAmelCase : List[str] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def UpperCAmelCase ( cls : Tuple ) -> str:
"""simple docstring"""
lowercase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
lowercase__ : Tuple = 1
return cls
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,128_063 )
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = '''en'''
lowercase__ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_snake_case )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
self.assertIn(_snake_case ,self.tokenizer.all_special_ids )
# fmt: off
lowercase__ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowercase__ : Dict = self.tokenizer.decode(_snake_case ,skip_special_tokens=_snake_case )
lowercase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_snake_case )
self.assertEqual(_snake_case ,_snake_case )
self.assertNotIn(self.tokenizer.eos_token ,_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = tempfile.mkdtemp()
lowercase__ : str = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case )
lowercase__ : Optional[int] = MaMaaaTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.lang_token_to_id ,_snake_case )
@require_torch
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''en'''
lowercase__ : List[str] = '''fr'''
lowercase__ : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_snake_case ,return_tensors='''pt''' )
lowercase__ : List[str] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
lowercase__ : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[int] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
lowercase__ : Tuple = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Any = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase__ : Tuple = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_snake_case ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} ,)
| 122 | 1 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
SCREAMING_SNAKE_CASE__ : List[str] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = """maskformer"""
_UpperCamelCase : Tuple = {"""hidden_size""": """mask_feature_size"""}
_UpperCamelCase : Optional[int] = ["""resnet""", """swin"""]
_UpperCamelCase : Any = ["""detr"""]
def __init__( self , snake_case = 256 , snake_case = 256 , snake_case = 0.1 , snake_case = False , snake_case = None , snake_case = None , snake_case = 0.02 , snake_case = 1.0 , snake_case = 1.0 , snake_case = 1.0 , snake_case = 20.0 , snake_case = None , **snake_case , ) -> str:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a__ : Optional[int] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a__ : Any = backbone_config.pop("model_type" )
a__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
a__ : int = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a__ : int = DetrConfig()
else:
# verify that the decoder is supported
a__ : List[str] = (
decoder_config.pop("model_type" ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a__ : Optional[Any] = CONFIG_MAPPING[decoder_type]
a__ : Optional[Any] = config_class.from_dict(UpperCAmelCase__ )
a__ : Any = backbone_config
a__ : Optional[int] = decoder_config
# main feature dimension for the model
a__ : List[str] = fpn_feature_size
a__ : Union[str, Any] = mask_feature_size
# initializer
a__ : Optional[int] = init_std
a__ : Any = init_xavier_std
# Hungarian matcher && loss
a__ : Optional[int] = cross_entropy_weight
a__ : str = dice_weight
a__ : str = mask_weight
a__ : List[Any] = use_auxiliary_loss
a__ : str = no_object_weight
a__ : str = output_auxiliary_logits
a__ : Dict = self.decoder_config.encoder_attention_heads
a__ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def _snake_case ( cls , snake_case , snake_case , **snake_case ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def _snake_case ( self ) -> Dict[str, any]:
"""simple docstring"""
a__ : Optional[int] = copy.deepcopy(self.__dict__ )
a__ : Tuple = self.backbone_config.to_dict()
a__ : str = self.decoder_config.to_dict()
a__ : Optional[int] = self.__class__.model_type
return output
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase :Optional[int] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = 42
@flax_register_to_config
class UpperCamelCase ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A__ = 32
A__ = 4
A__ = 4
A__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ = False
A__ = (320, 640, 1280, 1280)
A__ = 2
A__ = 8
A__ = None
A__ = 1280
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = False
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
_SCREAMING_SNAKE_CASE : int = jnp.zeros(snake_case__ , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : str = jnp.ones((1,) , dtype=jnp.intaa )
_SCREAMING_SNAKE_CASE : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = jax.random.split(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
_SCREAMING_SNAKE_CASE : List[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
_SCREAMING_SNAKE_CASE : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : int = (num_attention_heads,) * len(self.down_block_types )
# down
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : List[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_SCREAMING_SNAKE_CASE : str = output_channel
_SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
_SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE : str = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = down_blocks
# mid
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Optional[Any] = list(reversed(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Any = list(reversed(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Any = list(reversed(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = output_channel
_SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[i]
_SCREAMING_SNAKE_CASE : List[Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )]
_SCREAMING_SNAKE_CASE : Optional[Any] = i == len(snake_case__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_SCREAMING_SNAKE_CASE : int = FlaxCrossAttnUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE : str = FlaxUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = output_channel
_SCREAMING_SNAKE_CASE : int = up_blocks
# out
_SCREAMING_SNAKE_CASE : str = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__ = True , snake_case__ = False , ):
"""simple docstring"""
if not isinstance(snake_case__ , jnp.ndarray ):
_SCREAMING_SNAKE_CASE : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE : Tuple = timesteps.astype(dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : Tuple = jnp.expand_dims(snake_case__ , 0 )
_SCREAMING_SNAKE_CASE : List[str] = self.time_proj(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = self.time_embedding(snake_case__ )
# 2. pre-process
_SCREAMING_SNAKE_CASE : Dict = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_in(snake_case__ )
# 3. down
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_SCREAMING_SNAKE_CASE : str = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case__ , snake_case__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_down_block_res_samples
# 4. mid
_SCREAMING_SNAKE_CASE : Dict = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_SCREAMING_SNAKE_CASE : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
_SCREAMING_SNAKE_CASE : int = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = up_block(
snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , )
else:
_SCREAMING_SNAKE_CASE : List[str] = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train )
# 6. post-process
_SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = self.conv_out(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case__ )
| 295 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase_ : Tuple = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : int=None, lowerCamelCase__ : Any=None, lowerCamelCase__ : Any=None ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = True
while ask_again:
_SCREAMING_SNAKE_CASE : List[str] = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict=[], lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : str=0 ) -> str:
_SCREAMING_SNAKE_CASE : int = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict:
_SCREAMING_SNAKE_CASE : int = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowerCAmelCase ( lowerCamelCase__ : List[Any] ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 295 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Optional[int] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['MaskFormerFeatureExtractor']
A : Optional[int] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
A : int = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 371 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict[Optional[str], Type[Formatter]] = {}
A : Dict[Optional[str], str] = {}
A : Dict[Optional[str], Exception] = {}
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> str:
'''simple docstring'''
__snake_case = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__snake_case = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__snake_case = format_type
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[Any]:
'''simple docstring'''
__snake_case = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__snake_case = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
A : Any = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
A : Tuple = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
A : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _lowerCAmelCase ( _lowerCAmelCase ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _lowerCAmelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Formatter:
'''simple docstring'''
__snake_case = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 371 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=True , _a=1 / 255 , _a=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self , _a , _a=False ):
"""simple docstring"""
if not batched:
a__ = image_inputs[0]
if isinstance(_a , Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['shortest_edge'] * h / w )
a__ = self.size['shortest_edge']
elif w > h:
a__ = self.size['shortest_edge']
a__ = int(self.size['shortest_edge'] * w / h )
else:
a__ = self.size['shortest_edge']
a__ = self.size['shortest_edge']
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(_a , key=lambda _a : item[0] )[0]
a__ = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Dict = DeformableDetrImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
a__ = DeformableDetrImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'image_mean' ) )
self.assertTrue(hasattr(_a , 'image_std' ) )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'do_rescale' ) )
self.assertTrue(hasattr(_a , 'do_pad' ) )
self.assertTrue(hasattr(_a , 'size' ) )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _a )
a__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _a )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
a__ = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(_a , return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(_a , return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
# prepare image and target
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'image_id': 3_9769, 'annotations': target}
# encode them
a__ = DeformableDetrImageProcessor()
a__ = image_processing(images=_a , annotations=_a , return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _a )
a__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
a__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _a ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _a )
a__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _a , atol=1e-3 ) )
# verify image_id
a__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _a ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _a ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _a ) )
# verify orig_size
a__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _a ) )
# verify size
a__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _a ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
a__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a__ = DeformableDetrImageProcessor(format='coco_panoptic' )
a__ = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _a )
a__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
a__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _a ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _a )
a__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _a , atol=1e-3 ) )
# verify image_id
a__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _a ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _a ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _a ) )
# verify masks
a__ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _a )
# verify orig_size
a__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _a ) )
# verify size
a__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _a ) )
| 126 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : str = 16
__A : Union[str, Any] = 32
def lowerCAmelCase_ ( a : str ):
return int(x / 2**20 )
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
a__ = torch.cuda.memory_allocated()
a__ = torch.cuda.max_memory_allocated()
a__ = bamb(self.end - self.begin )
a__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( a : Accelerator , a : int = 16 , a : str = "bert-base-cased" , a : int = 320 , a : int = 160 , ):
a__ = AutoTokenizer.from_pretrained(a )
a__ = load_dataset(
'glue' , 'mrpc' , split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(a : Any ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(a , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
a__ = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any] ):
# Initialize accelerator
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = args.model_name_or_path
set_seed(a )
a__ , a__ = get_dataloaders(a , a , a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(a , return_dict=a )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=a )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a__ = 1
a__ = (len(a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=0 , num_training_steps=a , )
else:
a__ = DummyScheduler(a , total_num_steps=a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
a , a , a , a , a )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = {}
for epoch in range(a , a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a ):
a__ = model(**a )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(a , a )
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a , )
parser.add_argument(
'--output_dir' , type=a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=a , default=a , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=a , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=a , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=a , default=1 , help='Number of train epochs.' , )
a__ = parser.parse_args()
a__ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 126 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = 'xvjiarui/stable-diffusion-2-inpainting'
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case )
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = num_samples * [init_image]
_UpperCAmelCase = num_samples * [mask_image]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pipeline.prepare_inputs(snake_case , snake_case , snake_case )
# shard inputs and rng
_UpperCAmelCase = replicate(snake_case )
_UpperCAmelCase = jax.random.split(snake_case , jax.device_count() )
_UpperCAmelCase = shard(snake_case )
_UpperCAmelCase = shard(snake_case )
_UpperCAmelCase = shard(snake_case )
_UpperCAmelCase = pipeline(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case )
_UpperCAmelCase = output.images.reshape(snake_case , 512 , 512 , 3 )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 573 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 573 | 1 |
from __future__ import annotations
from collections.abc import Generator
def _snake_case () -> Generator[int, None, None]:
_lowercase ={}
_lowercase =2
while True:
_lowercase =factor_map.pop(_snake_case , _snake_case)
if factor:
_lowercase =factor + prime
while x in factor_map:
x += factor
_lowercase =factor
else:
_lowercase =prime
yield prime
prime += 1
def _snake_case (_snake_case : float = 1E10) -> int:
_lowercase =sieve()
_lowercase =1
while True:
_lowercase =next(_snake_case)
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_snake_case)
n += 2
if __name__ == "__main__":
print(solution())
| 557 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Tuple, snake_case :Tuple=False, snake_case :Optional[int]=False, snake_case :Optional[Any]=6.0, snake_case :Optional[Any]=None, snake_case :Optional[Any]=False, snake_case :List[str]=False, snake_case :Optional[int]=None, snake_case :List[Any]="fp4", snake_case :Dict=False, **snake_case :Optional[int], ):
"""simple docstring"""
_lowercase =load_in_abit
_lowercase =load_in_abit
_lowercase =llm_inta_threshold
_lowercase =llm_inta_skip_modules
_lowercase =llm_inta_enable_fpaa_cpu_offload
_lowercase =llm_inta_has_fpaa_weight
_lowercase =bnb_abit_quant_type
_lowercase =bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowercase =torch.floataa
elif isinstance(snake_case, snake_case):
_lowercase =getattr(snake_case, snake_case)
elif isinstance(snake_case, torch.dtype):
_lowercase =bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype')
self.post_init()
def UpperCamelCase__ ( self :int):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold, snake_case):
raise ValueError('llm_int8_threshold must be a float')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules, snake_case):
raise ValueError('llm_int8_skip_modules must be a list of strings')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload, snake_case):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean')
if not isinstance(self.llm_inta_has_fpaa_weight, snake_case):
raise ValueError('llm_int8_has_fp16_weight must be a boolean')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype, torch.dtype):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype')
if not isinstance(self.bnb_abit_quant_type, snake_case):
raise ValueError('bnb_4bit_quant_type must be a string')
if not isinstance(self.bnb_abit_use_double_quant, snake_case):
raise ValueError('bnb_4bit_use_double_quant must be a boolean')
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes')) >= version.parse(
'0.39.0'):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls :Optional[int], snake_case :List[Any], snake_case :List[Any], **snake_case :Tuple):
"""simple docstring"""
_lowercase =cls(**snake_case)
_lowercase =[]
for key, value in kwargs.items():
if hasattr(snake_case, snake_case):
setattr(snake_case, snake_case, snake_case)
to_remove.append(snake_case)
for key in to_remove:
kwargs.pop(snake_case, snake_case)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self :Any, snake_case :Union[str, os.PathLike]):
"""simple docstring"""
with open(snake_case, 'w', encoding='utf-8') as writer:
_lowercase =self.to_dict()
_lowercase =json.dumps(snake_case, indent=2, sort_keys=snake_case) + '\n'
writer.write(snake_case)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =copy.deepcopy(self.__dict__)
_lowercase =str(output['bnb_4bit_compute_dtype']).split('.')[1]
return output
def __repr__( self :List[Any]):
"""simple docstring"""
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def UpperCamelCase__ ( self :Dict, snake_case :bool = True):
"""simple docstring"""
if use_diff is True:
_lowercase =self.to_diff_dict()
else:
_lowercase =self.to_dict()
return json.dumps(snake_case, indent=2, sort_keys=snake_case) + "\n"
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.to_dict()
# get the default config dict
_lowercase =BitsAndBytesConfig().to_dict()
_lowercase ={}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowercase =value
return serializable_config_dict
| 557 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Any = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
A__ : str = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
A__ : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any = ['input_ids', 'attention_mask']
lowerCamelCase : Optional[Any] = DistilBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
__lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
__lowerCamelCase : Optional[Any] = do_lower_case
__lowerCamelCase : Dict = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : Tuple = normalizer_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = do_lower_case
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Any:
__lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 13 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 0 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase ( __magic_name__ ):
_a = """microsoft/speecht5_tts"""
_a = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_a = """text_reader"""
_a = SpeechTaProcessor
_a = SpeechTaForTextToSpeech
_a = SpeechTaHifiGan
_a = ["""text"""]
_a = ["""audio"""]
def UpperCamelCase__ ( self ) -> List[str]:
if self.post_processor is None:
__a = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None ) -> Any:
__a = self.pre_processor(text=UpperCamelCase , return_tensors='pt' , truncation=UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
__a = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
__a = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , UpperCamelCase ) -> str:
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Any:
with torch.no_grad():
return self.post_processor(UpperCamelCase ).cpu().detach()
| 721 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : int ):
return int((input_a, input_a).count(0 ) != 0 )
def SCREAMING_SNAKE_CASE ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 490 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a : Any = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : List[Any] ) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCAmelCase ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Base Case
if index == len(__lowerCAmelCase ):
return True
# Recursive Step
for i in range(__lowerCAmelCase ):
if valid_coloring(graph[index] , __lowerCAmelCase , __lowerCAmelCase ):
# Color current vertex
_snake_case : int = i
# Validate coloring
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 ):
return True
# Backtrack
_snake_case : Optional[Any] = -1
return False
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = [-1] * len(__lowerCAmelCase )
if util_color(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 ):
return colored_vertices
return []
| 304 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase_: Optional[int] = datasets.utils.logging.get_logger(__name__)
lowercase_: Optional[Any] = ['names', 'prefix']
lowercase_: Optional[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase_: List[Any] = ['encoding_errors', 'on_bad_lines']
lowercase_: Any = ['date_format']
@dataclass
class lowercase__ (datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : str = ","
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCamelCase : Optional[List[str]] = None
__UpperCamelCase : Optional[List[str]] = None
__UpperCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCamelCase : Optional[Union[List[int], List[str]]] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCamelCase : Optional[list] = None
__UpperCamelCase : Optional[list] = None
__UpperCamelCase : bool = False
__UpperCamelCase : Optional[Union[int, List[int]]] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[Union[str, List[str]]] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : str = "."
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : str = '"'
__UpperCamelCase : int = 0
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : int = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : int = 1_0_0_0_0
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : Optional[str] = "strict"
__UpperCamelCase : Literal["error", "warn", "skip"] = "error"
__UpperCamelCase : Optional[str] = None
def lowercase ( self : Dict ):
if self.delimiter is not None:
snake_case__ : int = self.delimiter
if self.column_names is not None:
snake_case__ : List[str] = self.column_names
@property
def lowercase ( self : str ):
snake_case__ : Any = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCamelCase : Tuple = CsvConfig
def lowercase ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self : List[str] , __a : Union[str, Any] ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case__ : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
snake_case__ : str = data_files
if isinstance(__a , __a ):
snake_case__ : str = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
snake_case__ : Any = [files]
snake_case__ : Optional[Any] = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def lowercase ( self : Tuple , __a : pa.Table ):
if self.config.features is not None:
snake_case__ : Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(__a ) for feature in self.config.features.values() ):
# cheaper cast
snake_case__ : List[str] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case__ : Optional[int] = table_cast(__a , __a )
return pa_table
def lowercase ( self : Any , __a : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case__ : Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
snake_case__ : Optional[int] = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__a ):
snake_case__ : List[str] = pa.Table.from_pandas(__a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__a )}: {e}' )
raise
| 127 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_: int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: Union[str, Any] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 127 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowercase = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_lowercase : bool = None
_lowercase : bool = None
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_lowercase : List[str] = datasets.Audio()
_lowercase : Dict = '''audio'''
_lowercase : List[str] = AudioFolderConfig
_lowercase : List[str] # definition at the bottom of the script
_lowercase : Optional[Any] = AudioClassification(audio_column='''audio''' , label_column='''label''' )
_lowercase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_lowercase = AUDIO_EXTENSIONS
| 5 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'spiece.model'}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
UpperCAmelCase__ : Optional[int] = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : List[int] = []
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Tuple = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : List[str] = """"""
SCREAMING_SNAKE_CASE__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""use_source_tokenizer""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
sub_texts.append(SCREAMING_SNAKE_CASE__ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : Any = """""".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )
return clean_text
else:
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 223 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a__ :
_a : List[str]
_a : Optional[str] = None
# Automatically constructed
_a : ClassVar[str] = "dict"
_a : ClassVar[Any] = None
_a : str = field(default="""Translation""" , init=snake_case__ , repr=snake_case__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class a__ :
_a : Optional[List] = None
_a : Optional[int] = None
_a : Optional[str] = None
# Automatically constructed
_a : ClassVar[str] = "dict"
_a : ClassVar[Any] = None
_a : str = field(default="""TranslationVariableLanguages""" , init=snake_case__ , repr=snake_case__ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None
__lowerCAmelCase = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = set(self.languages )
if self.languages and set(_A ) - lang_set:
raise ValueError(
f"""Some languages in example ({', '.join(sorted(set(_A ) - lang_set ) )}) are not in valid set ({', '.join(_A )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__lowerCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(_A , _A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__lowerCAmelCase , __lowerCAmelCase = zip(*sorted(_A ) )
return {"language": languages, "translation": translations}
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 552 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase__ = random.Random()
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None ):
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=2_4 , _A=2_4 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = feature_size
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE( self , _A=False , _A=False ):
"""simple docstring"""
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[int] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase = feature_extractor(_A , padding=_A , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowerCAmelCase = np.asarray(_A )
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
__lowerCAmelCase = feature_extractor(_A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase = [None, 1_6, None]
for max_length, padding in zip(_A , _A ):
__lowerCAmelCase = feature_extractor(
_A , padding=_A , max_length=_A , return_attention_mask=_A )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = ["longest", "max_length", "do_not_pad"]
__lowerCAmelCase = [None, 1_6, None]
for max_length, padding in zip(_A , _A ):
__lowerCAmelCase = feature_extractor(
_A , max_length=_A , padding=_A , return_tensors="np" , return_attention_mask=_A )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = feature_extractor(
_A , padding="max_length" , max_length=4 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = feature_extractor(
_A , padding="longest" , max_length=4 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = feature_extractor(
_A , padding="longest" , max_length=1_6 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
__lowerCAmelCase = inputs.input_features
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort("id" ).select(range(_A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = feature_extractor(_A , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , _A , atol=1E-4 ) )
| 552 | 1 |
'''simple docstring'''
A__ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
A__ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCamelCase : Union[str, Any] = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(UpperCAmelCase_ )}'
)
raise ValueError(UpperCAmelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : int = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : Optional[int] , *_UpperCamelCase : int , **_UpperCamelCase : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 403 | 0 |
import os
def snake_case__ ( ) -> Any:
"""simple docstring"""
with open(os.path.dirname(__lowercase ) + "/p022_names.txt" ) as file:
A__ : Any = str(file.readlines()[0] )
A__ : Optional[int] = names.replace("\"" , "" ).split("," )
names.sort()
A__ : List[str] = 0
A__ : int = 0
for i, name in enumerate(__lowercase ):
for letter in name:
name_score += ord(__lowercase ) - 6_4
total_score += (i + 1) * name_score
A__ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution()) | 702 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def snake_case__ ( ) -> List[Any]:
"""simple docstring"""
A__ : List[Any] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
A__ : int = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__lowercase )
DownloadCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
RunCommand.register_subcommand(__lowercase )
ServeCommand.register_subcommand(__lowercase )
UserCommands.register_subcommand(__lowercase )
AddNewModelCommand.register_subcommand(__lowercase )
AddNewModelLikeCommand.register_subcommand(__lowercase )
LfsCommands.register_subcommand(__lowercase )
PTtoTFCommand.register_subcommand(__lowercase )
# Let's go
A__ : int = parser.parse_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
A__ : Any = args.func(__lowercase )
service.run()
if __name__ == "__main__":
main() | 182 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for attribute in key.split('.' ):
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "running_mean":
lowercase = value
elif weight_type == "running_var":
lowercase = value
elif weight_type == "num_batches_tracked":
lowercase = value
elif weight_type == "inv_freq":
lowercase = value
else:
lowercase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowercase = True
else:
for key, mapped_key in MAPPING.items():
lowercase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE )
if "pos_bias_u" in name:
lowercase = None
elif "pos_bias_v" in name:
lowercase = None
elif "weight_g" in name:
lowercase = 'weight_g'
elif "weight_v" in name:
lowercase = 'weight_v'
elif "bias" in name:
lowercase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase = 'weight'
elif "running_mean" in name:
lowercase = 'running_mean'
elif "inv_freq" in name:
lowercase = 'inv_freq'
elif "running_var" in name:
lowercase = 'running_var'
elif "num_batches_tracked" in name:
lowercase = 'num_batches_tracked'
else:
lowercase = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = full_name.split('conv_layers.' )[-1]
lowercase = name.split('.' )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ):
if config_path is not None:
lowercase = WavaVecaConformerConfig.from_pretrained(__SCREAMING_SNAKE_CASE , hidden_act='swish' )
else:
lowercase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase = 'rotary'
if is_finetuned:
if dict_path:
lowercase = Dictionary.load(__SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase = target_dict.pad_index
lowercase = target_dict.bos_index
lowercase = target_dict.eos_index
lowercase = len(target_dict.symbols )
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__SCREAMING_SNAKE_CASE ) )
return
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase = 0
lowercase = 1
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = WavaVecaCTCTokenizer(
__SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__SCREAMING_SNAKE_CASE , )
lowercase = True if config.feat_extract_norm == 'layer' else False
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
lowercase = WavaVecaProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = WavaVecaConformerForCTC(__SCREAMING_SNAKE_CASE )
else:
lowercase = WavaVecaConformerForPreTraining(__SCREAMING_SNAKE_CASE )
if is_finetuned:
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowercase = argparse.Namespace(task='audio_pretraining' )
lowercase = fairseq.tasks.setup_task(__SCREAMING_SNAKE_CASE )
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__SCREAMING_SNAKE_CASE )
lowercase = model[0].eval()
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 84 |
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__lowerCamelCase = 0
for ch in input_str:
__lowerCamelCase = ord(UpperCamelCase__ )
__lowerCamelCase = pow(2 , UpperCamelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
if attention_mask is None:
UpperCamelCase__ : List[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ : Union[str, Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ : Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase__ )
if decoder_head_mask is None:
UpperCamelCase__ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
if cross_attn_head_mask is None:
UpperCamelCase__ : str = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=2_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : int = seq_length
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Optional[Any] = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : str = encoder_layerdrop
UpperCamelCase__ : Optional[int] = decoder_layerdrop
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : Optional[Any] = eos_token_id
UpperCamelCase__ : int = pad_token_id
UpperCamelCase__ : List[str] = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Optional[Any] = self.eos_token_id # Eos Token
UpperCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[Any] = self.get_config()
UpperCamelCase__ : List[str] = prepare_mam_aaa_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = MaMaaaModel(config=__SCREAMING_SNAKE_CASE ).get_decoder().to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : Any = inputs_dict['''input_ids''']
UpperCamelCase__ : int = inputs_dict['''attention_mask''']
UpperCamelCase__ : Union[str, Any] = inputs_dict['''head_mask''']
# first forward pass
UpperCamelCase__ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-2 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = MaMaaaModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : Tuple = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = outputs.encoder_last_hidden_state
UpperCamelCase__ : List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Dict = model.get_encoder()
encoder.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = MaMaaaEncoder.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : str = model.get_decoder()
decoder.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = MaMaaaDecoder.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = MaMaaaModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : Dict = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertEqual(info['''missing_keys'''] , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Optional[int] = copy.deepcopy(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if not self.is_encoder_decoder:
UpperCamelCase__ : Tuple = inputs['''input_ids''']
del inputs["input_ids"]
else:
UpperCamelCase__ : str = inputs['''input_ids''']
UpperCamelCase__ : List[Any] = inputs.get('''decoder_input_ids''' , __SCREAMING_SNAKE_CASE )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ : str = wte(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Union[str, Any] = wte(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = wte(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
model(**__SCREAMING_SNAKE_CASE )[0]
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Dict = input_dict['''input_ids''']
UpperCamelCase__ : Optional[Any] = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = MaMaaaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval().to(__SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
model.generate(num_beams=4 , do_sample=__SCREAMING_SNAKE_CASE , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
lowerCamelCase =1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase__ : Any = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase__ : Dict = prepare_mam_aaa_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase__ : str = model(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : List[Any] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__SCREAMING_SNAKE_CASE )
# change to intended input
UpperCamelCase__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase__ : int = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase__ : List[str] = prepare_mam_aaa_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Dict = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
UpperCamelCase__ : Tuple = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ : Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : List[Any] = model.generate(
input_ids=dct['''input_ids'''].to(__SCREAMING_SNAKE_CASE ) , attention_mask=dct['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
UpperCamelCase__ : Tuple = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
UpperCamelCase__ : Any = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert generated == expected_en
| 462 |
import torch
from transformers import AutoModel
class _lowerCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ) -> Optional[Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase__ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase__ : List[Any] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ) -> str:
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = W_supports['''sizes'''].tolist()
UpperCamelCase__ : Tuple = W_supports['''start_token_id'''].item()
UpperCamelCase__ : int = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase__ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Any = W_supports['''input_ids'''] == start_token_id
UpperCamelCase__ : Optional[int] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase__ : int = 0
else:
UpperCamelCase__ : Optional[int] = support_sizes[i - 1]
UpperCamelCase__ : List[Any] = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase__ : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase__ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase__ : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase__ : Union[str, Any] = torch.vstack((p_starts, p_start) )
UpperCamelCase__ : str = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase__ : List[Any] = p_start
UpperCamelCase__ : List[str] = p_end
return p_starts, p_ends
| 462 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
UpperCAmelCase_ : str = 1.0 if scale is None else scale
UpperCAmelCase_ : Any = 0.0 if loc is None else loc
super().__init__(_SCREAMING_SNAKE_CASE ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=_SCREAMING_SNAKE_CASE )] )
@property
def a__ ( self ) -> Any:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> Optional[Any]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> int:
return self.variance.sqrt()
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = args_dim
UpperCAmelCase_ : int = nn.ModuleList([nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
UpperCAmelCase_ : Optional[int] = domain_map
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple[torch.Tensor]:
UpperCAmelCase_ : Optional[int] = [proj(_SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*_SCREAMING_SNAKE_CASE )
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ : Tuple = function
def a__ ( self ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.function(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE )
class __a:
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self ,_SCREAMING_SNAKE_CASE = 1 ) -> None:
UpperCAmelCase_ : int = dim
UpperCAmelCase_ : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if self.dim == 1:
return self.distribution_class(*_SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*_SCREAMING_SNAKE_CASE ) ,1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> Distribution:
UpperCAmelCase_ : str = self._base_distribution(_SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_SCREAMING_SNAKE_CASE ,loc=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> nn.Module:
return ParameterProjection(
in_features=_SCREAMING_SNAKE_CASE ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,)
def a__ ( self ,*_SCREAMING_SNAKE_CASE ) -> Any:
raise NotImplementedError()
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase = StudentT
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Optional[Any] = cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ : Union[str, Any] = 2.0 + cls.squareplus(_SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = {"loc": 1, "scale": 1}
lowerCAmelCase = Normal
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Tuple = cls.squareplus(_SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = {"total_count": 1, "logits": 1}
lowerCAmelCase = NegativeBinomial
@classmethod
def a__ ( cls ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = cls.squareplus(_SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Distribution:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE ) ,1 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> Distribution:
UpperCAmelCase_, UpperCAmelCase_ : Dict = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 30 | '''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__a: Optional[Any] = True
from torch.cuda.amp import autocast
__a: Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to log verbose messages or not."} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ : int = logging.WARNING
if model_args.verbose_logging:
lowercase__ : str = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase__ : List[Any] = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCAmelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
lowercase__ : List[str] = self.feature_extractor.pad(
__lowerCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
lowercase__ : Optional[Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase__ : str = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
lowercase__ : int = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase__ : Any = 1
lowercase__ : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase__ : List[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__lowerCAmelCase , min_masks=2 , )
return batch
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=1.0 , **__lowerCAmelCase ) -> int:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Union[str, Any] = 0
lowercase__ : List[str] = max_gumbel_temp
lowercase__ : Union[str, Any] = min_gumbel_temp
lowercase__ : List[Any] = gumbel_temp_decay
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> torch.Tensor:
model.train()
lowercase__ : List[Any] = self._prepare_inputs(__lowerCAmelCase )
if self.use_amp:
with autocast():
lowercase__ : List[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
else:
lowercase__ : Optional[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : Dict = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
lowercase__ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase__ : Dict = DatasetDict()
lowercase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
lowercase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase__ : Any = DatasetDict()
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
lowercase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase ):
# check that all files have the correct sampling rate
lowercase__ , lowercase__ : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase__ : int = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
lowercase__ : List[Any] = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase__ : Tuple = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase__ : Union[str, Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
lowercase__ : int = WavaVecaForPreTraining(UpperCAmelCase )
lowercase__ : List[Any] = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
lowercase__ : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 152 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase__ : Dict = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :int = 14 ) -> None:
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
SCREAMING_SNAKE_CASE : Tuple = primes[group]['''prime''']
SCREAMING_SNAKE_CASE : Any = primes[group]['''generator''']
SCREAMING_SNAKE_CASE : Optional[Any] = int(hexlify(urandom(32 ) ) , base=16 )
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase_ )[2:]
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int ) -> bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ , base=16 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('''Invalid public key''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = pow(lowerCamelCase_ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ) -> bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_ , (prime - 1) // 2 , lowerCamelCase_ ) == 1
)
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :int = 14 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = int(lowerCamelCase_ , base=16 )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(lowerCamelCase_ , base=16 )
SCREAMING_SNAKE_CASE : Union[str, Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''Invalid public key''' )
SCREAMING_SNAKE_CASE : Tuple = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(__magic_name__ ):
lowercase = row[0]
for column_index, column in enumerate(__magic_name__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__magic_name__ )
continue
for column_index in range(len(__magic_name__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__magic_name__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(__magic_name__ )
for i in range(len(__magic_name__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __magic_name__ )
lowercase = resultant
return final_set
def __snake_case ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
lowercase = len(__magic_name__ ) + 1
if any(len(__magic_name__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(__magic_name__ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(__magic_name__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(__magic_name__ ):
if 0 not in row:
lowercase = data_set.pop(__magic_name__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , __magic_name__ )
lowercase = data_set.copy()
lowercase = simplify(__magic_name__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(__magic_name__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__magic_name__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(__magic_name__ ):
current_solution -= column * solutions[column_index]
solutions.append(__magic_name__ )
lowercase = []
for item in solutions:
final.append(float(round(__magic_name__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 441 |
def __snake_case ( __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowercase = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowercase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__magic_name__ , 1 ):
if n < _p:
# then we have our last prime to check
lowercase = primes[:idx]
break
lowercase , lowercase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase = False
for r in range(__magic_name__ ):
lowercase = pow(__magic_name__ , d * 2**r , __magic_name__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __snake_case ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 441 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__a = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__a = F'''https://www.google.com/search?q={query}&num=100'''
__a = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__a = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__a = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 689 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 1 |
'''simple docstring'''
from typing import Any
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , ) -> list:
"""simple docstring"""
_validation(
a__ , a__ , a__ , a__ , a__ , )
# Creates data structures and fill initial step
_snake_case : dict = {}
_snake_case : dict = {}
for state in states_space:
_snake_case : List[Any] = observations_space[0]
_snake_case : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_snake_case : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(a__)):
_snake_case : Union[str, Any] = observations_space[o]
_snake_case : List[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_snake_case : List[str] = ''
_snake_case : Any = -1
for k_state in states_space:
_snake_case : Union[str, Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_snake_case : List[str] = probability
_snake_case : Optional[int] = k_state
# Update probabilities and pointers dicts
_snake_case : Any = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_snake_case : Union[str, Any] = arg_max
# The final observation
_snake_case : List[str] = observations_space[len(a__) - 1]
# argmax for given final observation
_snake_case : int = ''
_snake_case : Tuple = -1
for k_state in states_space:
_snake_case : str = probabilities[(k_state, final_observation)]
if probability > max_probability:
_snake_case : List[str] = probability
_snake_case : int = k_state
_snake_case : Union[str, Any] = arg_max
# Process pointers backwards
_snake_case : List[str] = last_state
_snake_case : Dict = []
for o in range(len(a__) - 1 , -1 , -1):
result.append(a__)
_snake_case : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , ) -> None:
"""simple docstring"""
_validate_not_empty(
a__ , a__ , a__ , a__ , a__ , )
_validate_lists(a__ , a__)
_validate_dicts(
a__ , a__ , a__)
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('There\'s an empty parameter')
def lowerCamelCase__ ( a__ , a__) -> None:
"""simple docstring"""
_validate_list(a__ , 'observations_space')
_validate_list(a__ , 'states_space')
def lowerCamelCase__ ( a__ , a__) -> None:
"""simple docstring"""
if not isinstance(_object , a__):
_snake_case : Dict = F"""{var_name} must be a list"""
raise ValueError(a__)
else:
for x in _object:
if not isinstance(a__ , a__):
_snake_case : List[Any] = F"""{var_name} must be a list of strings"""
raise ValueError(a__)
def lowerCamelCase__ ( a__ , a__ , a__ , ) -> None:
"""simple docstring"""
_validate_dict(a__ , 'initial_probabilities' , a__)
_validate_nested_dict(a__ , 'transition_probabilities')
_validate_nested_dict(a__ , 'emission_probabilities')
def lowerCamelCase__ ( a__ , a__) -> None:
"""simple docstring"""
_validate_dict(_object , a__ , a__)
for x in _object.values():
_validate_dict(a__ , a__ , a__ , a__)
def lowerCamelCase__ ( a__ , a__ , a__ , a__ = False) -> None:
"""simple docstring"""
if not isinstance(_object , a__):
_snake_case : List[str] = F"""{var_name} must be a dict"""
raise ValueError(a__)
if not all(isinstance(a__ , a__) for x in _object):
_snake_case : Union[str, Any] = F"""{var_name} all keys must be strings"""
raise ValueError(a__)
if not all(isinstance(a__ , a__) for x in _object.values()):
_snake_case : List[str] = 'nested dictionary ' if nested else ''
_snake_case : str = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(a__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 517 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 517 | 1 |
'''simple docstring'''
def A_ ( snake_case ):
assert isinstance(snake_case , snake_case ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE:Optional[int] = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(snake_case )
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = sylvester(number - 1 )
SCREAMING_SNAKE_CASE:List[str] = num - 1
SCREAMING_SNAKE_CASE:Any = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 465 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = []
for line in lines:
SCREAMING_SNAKE_CASE:Any = re.sub(r"#.*" , "" , snake_case ) # remove comments
if line:
filtered_lines.append(snake_case )
SCREAMING_SNAKE_CASE:int = "\n".join(snake_case )
# Make a hash from all this code
SCREAMING_SNAKE_CASE:str = full_str.encode("utf-8" )
return shaaaa(snake_case ).hexdigest()
# get importable module names and hash for caching
A_ = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A_ = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A_ = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
A_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 465 | 1 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A, _A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase__ ( _a ):
a : Tuple = """gptj"""
a : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , A_ : Optional[int]=5_0_4_0_0 , A_ : Optional[Any]=2_0_4_8 , A_ : Optional[Any]=4_0_9_6 , A_ : Any=2_8 , A_ : Union[str, Any]=1_6 , A_ : int=6_4 , A_ : int=None , A_ : str="gelu_new" , A_ : str=0.0 , A_ : Optional[Any]=0.0 , A_ : Dict=0.0 , A_ : Dict=1e-5 , A_ : Optional[int]=0.02 , A_ : List[str]=True , A_ : List[Any]=5_0_2_5_6 , A_ : Optional[int]=5_0_2_5_6 , A_ : List[Any]=False , **A_ : int , ):
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class lowerCamelCase__ ( _a ):
def __init__( self : int , A_ : PretrainedConfig , A_ : str = "default" , A_ : List[PatchingSpec] = None , A_ : bool = False , ):
'''simple docstring'''
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , """pad_token_id""" , A_ ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="""inputs""" )
__lowercase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : PreTrainedTokenizer , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase = super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["""attention_mask"""]
if self.use_past:
__lowercase = ordered_inputs["""attention_mask"""].dtype
__lowercase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return 1_3
| 616 | 0 |
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = [
[],
[],
[],
]
def __A ( self , A , A ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__A )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def __A ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
__magic_name__ = []
def __A ( self , A ) -> None:
'''simple docstring'''
if len(self.queue ) == 1_00:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__A )
def __A ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
__magic_name__ = min(self.queue )
self.queue.remove(__A )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 714 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase):
__A : Union[str, Any] = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : str = FlaxAutoModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase):
__A : Any = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = FlaxAutoModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : str = FlaxBertModel.from_pretrained(_UpperCAmelCase)
__A : Dict = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**_UpperCAmelCase):
return model(**_UpperCAmelCase)
eval(**_UpperCAmelCase).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__A : List[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : int = FlaxRobertaModel.from_pretrained(_UpperCAmelCase)
__A : List[str] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**_UpperCAmelCase):
return model(**_UpperCAmelCase)
eval(**_UpperCAmelCase).block_until_ready()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = FlaxAutoModel.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__A : List[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'Use `from_pt=True` to load this model'):
__A : Tuple = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only') | 8 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Dict=10 , __lowerCamelCase : Optional[int]=[8, 16, 32, 64] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=None , __lowerCamelCase : int=["stage2", "stage3", "stage4"] , __lowerCamelCase : Optional[Any]=[2, 3, 4] , __lowerCamelCase : Union[str, Any]=1 , ) -> Any:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(__lowerCamelCase )
lowercase = out_features
lowercase = out_indices
lowercase = num_groups
def __a ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __a ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase = None
lowercase = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self : Dict ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase ,lowercase ,lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __a ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase = BitModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : int ) -> str:
'''simple docstring'''
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __a ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __a ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCamelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def __a ( self : str ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def __a ( self : Dict ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __a ( self : Any ) -> Dict:
'''simple docstring'''
pass
def __a ( self : Tuple ) -> int:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __a ( self : Any ) -> Optional[int]:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __a ( self : Tuple ) -> List[str]:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __a ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCamelCase )
# verify the logits
lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@require_torch
class __lowercase ( _A , unittest.TestCase ):
lowercase = (BitBackbone,) if is_torch_available() else ()
lowercase = BitConfig
lowercase = False
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = BitModelTester(self )
| 604 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : str = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Tuple = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = self.get_input_output_texts(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return text, ids
def UpperCamelCase ( self : Tuple ) -> Dict:
pass # TODO add if relevant
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
pass # TODO add if relevant
def UpperCamelCase ( self : List[Any] ) -> Any:
pass # TODO add if relevant
def UpperCamelCase ( self : Optional[int] ) -> str:
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Dict ) -> Optional[int]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> List[str]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase ( self : Dict ) -> Dict:
# tokenizer has no padding token
pass
| 137 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
_lowerCamelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase = coefficient_matrix.shape
__UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if colsa != 1:
__UpperCAmelCase = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if rowsa != rowsa:
__UpperCAmelCase = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != rowsa:
__UpperCAmelCase = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(_SCREAMING_SNAKE_CASE )} and {rowsa}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase = table.shape
strictly_diagonally_dominant(_SCREAMING_SNAKE_CASE )
# Iterates the whole matrix for given number of times
for _ in range(_SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = []
for row in range(_SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = 0
for col in range(_SCREAMING_SNAKE_CASE ):
if col == row:
__UpperCAmelCase = table[row][col]
elif col == cols - 1:
__UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase = (temp + val) / denom
new_val.append(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase = new_val
return [float(_SCREAMING_SNAKE_CASE ) for i in new_val]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = table.shape
__UpperCAmelCase = True
for i in range(0 , _SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
from jiwer import compute_measures
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE__ : Dict = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE__ : Optional[int] = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False ) -> Optional[Any]:
if concatenate_texts:
return compute_measures(UpperCamelCase__ , UpperCamelCase__ )["wer"]
else:
lowerCamelCase : List[Any] = 0
lowerCamelCase : Any = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Tuple = compute_measures(UpperCamelCase__ , UpperCamelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A_ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple ):
__a = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__a = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__a = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__a = tf_top_k_top_p_filtering(__SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__a = output[output != -float("inf" )]
__a = tf.cast(
tf.where(tf.not_equal(__SCREAMING_SNAKE_CASE , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-12 )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tf
class A_ ( unittest.TestCase , a_ ):
if is_tf_available():
_SCREAMING_SNAKE_CASE = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# TF-only test: tf.saved_model export
__a = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__a = 2
__a = 2
class A_ ( tf.Module ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__a = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ):
__a = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__a = [[2, 0], [1_02, 1_03]]
__a = [[1, 0], [1, 1]]
__a = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={"serving_default": dummy_model.serving} )
__a = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures["serving_default"]
for batch_size in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ):
__a = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
__a = serving_func(**__SCREAMING_SNAKE_CASE )["sequences"]
__a = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Any ):
# TF-only test: tf.saved_model export
__a = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__a = 1
__a = 2
class A_ ( tf.Module ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any ):
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__a = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
__a = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__a = [[2], [1_02, 1_03]]
__a = [[1], [1, 1]]
__a = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={"serving_default": dummy_model.serving} )
__a = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures["serving_default"]
for input_row in range(len(__SCREAMING_SNAKE_CASE ) ):
__a = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
__a = serving_func(**__SCREAMING_SNAKE_CASE )["sequences"]
__a = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def _UpperCAmelCase ( self : Union[str, Any] ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__SCREAMING_SNAKE_CASE )
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ):
super().__init__()
__a = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__SCREAMING_SNAKE_CASE , "spiece.model" ) , "rb" ).read() )
__a = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str] ):
__a = self.tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__a , __a = text.pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__a = self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(__SCREAMING_SNAKE_CASE )
__a = CompleteSentenceTransformer()
__a = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
__a = complete_model(__SCREAMING_SNAKE_CASE )
__a = tf.keras.Model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
keras_model.save(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Union[str, Any] ):
# Has PT equivalent: this test relies on random sampling
__a = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
__a = 14
__a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__a = "Hello, my dog is cute and"
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="tf" )
__a = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__a = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
__a = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__a = [6_38, 1_98]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
__a = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCAmelCase ( self : Optional[int] ):
# Has PT equivalent: ample use of framework-specific code
__a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
__a = "Hugging Face is a technology company based in New York and Paris."
__a = bart_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="tf" ).input_ids
__a = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
__a = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
class A_ ( a_ ):
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[Any] ):
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
__a = bart_model.generate(__SCREAMING_SNAKE_CASE , foo="bar" ).numpy()
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
class A_ ( bart_model.model.encoder.__class__ ):
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ):
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = FakeEncoder(bart_model.config , bart_model.model.shared )
__a = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__a = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__SCREAMING_SNAKE_CASE , foo="bar" )
| 702 | import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE : str = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
SCREAMING_SNAKE_CASE : Dict = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A ( _A ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
__a = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
__a = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
__a = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , _A )
# ffn -> feed_forward
__a = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
__a = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
__a = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
__a = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
__a = "rwkv." + name
__a = weight
return state_dict
def __A ( _A , _A , _A , _A=None , _A=None , _A=False , _A=None ):
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_A )
__a = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__a = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_A , _A )
__a = torch.load(_A , map_location="cpu" )
__a = convert_state_dict(_A )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
__a = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
__a = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
__a = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 525 | 0 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
def __init__( self :List[Any] , _lowerCamelCase :Union[str, Any]=None ):
__SCREAMING_SNAKE_CASE : int = data
__SCREAMING_SNAKE_CASE : Any = None
def __repr__( self :List[Any] ):
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = self
while temp:
string_rep.append(f'''{temp.data}''' )
__SCREAMING_SNAKE_CASE : Tuple = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase_ ( lowercase_ : list ):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = Node(elements_list[0] )
for i in range(1 , len(lowercase_ ) ):
__SCREAMING_SNAKE_CASE : Dict = Node(elements_list[i] )
__SCREAMING_SNAKE_CASE : Any = current.next
return head
def lowerCAmelCase_ ( lowercase_ : Node ):
'''simple docstring'''
if head_node is not None and isinstance(lowercase_ , lowercase_ ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE : str = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(lowercase_ )
print('''Elements in Reverse:''' )
print_reverse(lowercase_ )
if __name__ == "__main__":
main()
| 674 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModel.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForPreTraining.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForPreTraining.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Any ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = TFAutoModelForCausalLM.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForCausalLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForMaskedLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForMaskedLM.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_lowerCamelCase , output_loading_info=_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = TFAutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = TFAutoModelForQuestionAnswering.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForQuestionAnswering.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Tuple = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4_4_1_0 )
__SCREAMING_SNAKE_CASE : Tuple = AutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4_4_1_0 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_pt=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4_4_1_0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_lowerCamelCase , from_tf=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_lowerCamelCase ) , 1_4_4_1_0 )
| 674 | 1 |
UpperCamelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
order.append(__UpperCAmelCase )
return order
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return component
def A ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase ) * [False]
UpperCAmelCase_ = {vert: [] for vert in range(len(__UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__UpperCAmelCase )
UpperCAmelCase_ = []
for i, was_visited in enumerate(__UpperCAmelCase ):
if not was_visited:
order += topology_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = []
UpperCAmelCase_ = len(__UpperCAmelCase ) * [False]
for i in range(len(__UpperCAmelCase ) ):
UpperCAmelCase_ = order[len(__UpperCAmelCase ) - i - 1]
if not visited[vert]:
UpperCAmelCase_ = find_components(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
components_list.append(__UpperCAmelCase )
return components_list
| 561 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="bert"
def __init__( self :Any , _lowercase :Optional[int]=30522 , _lowercase :str=768 , _lowercase :Union[str, Any]=12 , _lowercase :Dict=12 , _lowercase :Optional[Any]=3072 , _lowercase :List[Any]="gelu" , _lowercase :Dict=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Optional[int]=512 , _lowercase :List[str]=2 , _lowercase :List[str]=0.02 , _lowercase :Union[str, Any]=1E-1_2 , _lowercase :Dict=0 , _lowercase :List[str]="absolute" , _lowercase :Union[str, Any]=True , _lowercase :str=None , **_lowercase :Union[str, Any] , ) -> Dict:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a_ ( _snake_case ):
@property
def __a ( self :str) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 561 | 1 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = len(UpperCamelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
snake_case_ = 0
print(UpperCamelCase__ , end=',' )
# Consider rest of the activities
for j in range(UpperCamelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase__ , end=',' )
snake_case_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[int] = [1, 3, 0, 5, 8, 5]
_UpperCAmelCase : List[str] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 362 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase ( lowercase_ ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
snake_case_ = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def a ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def a ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def a ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
snake_case_ = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def a ( self ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case_ = dataset
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def a ( self , snake_case ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
snake_case_ = os.path.join(self.tmpdirname , 'dataset' )
snake_case_ = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case ) , )
return retriever
def a ( self ):
snake_case_ = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
snake_case_ = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
snake_case_ = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(snake_case , open(snake_case , 'wb' ) )
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
snake_case_ = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case_ = self.get_dummy_dataset()
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def a ( self ):
snake_case_ = 1
snake_case_ = self.get_dummy_legacy_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ , snake_case_ , snake_case_ = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , snake_case )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a ( self ):
snake_case_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
snake_case_ = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def a ( self ):
import torch
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case )
snake_case_ , snake_case_ , snake_case_ = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(snake_case , np.ndarray )
snake_case_ = retriever(
snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case , return_tensors='pt' , )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case , torch.Tensor )
self.assertIsInstance(snake_case , torch.Tensor )
self.assertIsInstance(snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def a ( self ):
snake_case_ = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
retriever.set_ctx_encoder_tokenizer(snake_case )
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case )
self.assertEqual(
len(snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , snake_case ) # check for doc token related keys in dictionary.
| 362 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["DeiTFeatureExtractor"]
_A = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 228 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
__SCREAMING_SNAKE_CASE : Any = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'''([a-z\d])([A-Z])''')
__SCREAMING_SNAKE_CASE : int = re.compile(r'''(?<!_)_(?!_)''')
__SCREAMING_SNAKE_CASE : int = re.compile(r'''(_{2,})''')
__SCREAMING_SNAKE_CASE : Dict = r'''^\w+(\.\w+)*$'''
__SCREAMING_SNAKE_CASE : Optional[Any] = r'''<>:/\|?*'''
def a_ ( UpperCamelCase_ ):
A_ = _uppercase_uppercase_re.sub(R"\1_\2" , UpperCamelCase_ )
A_ = _lowercase_uppercase_re.sub(R"\1_\2" , UpperCamelCase_ )
return name.lower()
def a_ ( UpperCamelCase_ ):
A_ = _single_underscore_re.split(UpperCamelCase_ )
A_ = [_multiple_underscores_re.split(UpperCamelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase_ ) if n != "" )
def a_ ( UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase_ )
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase_ ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(UpperCamelCase_ )}-{split}"
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
A_ = filename_prefix_for_split(UpperCamelCase_ , UpperCamelCase_ )
if filetype_suffix:
prefix += f".{filetype_suffix}"
A_ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
return f"{filepath}*"
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
A_ = filename_prefix_for_split(UpperCamelCase_ , UpperCamelCase_ )
A_ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if shard_lengths:
A_ = len(UpperCamelCase_ )
A_ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase_ )]
if filetype_suffix:
A_ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
A_ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 452 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] =["pixel_values"]
def __init__( self : Optional[int] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : List[Any] , ):
super().__init__(**lowerCAmelCase )
A_ = size if size is not None else {"height": 3_84, "width": 3_84}
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
A_ = do_resize
A_ = size
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ = image_std if image_std is not None else OPENAI_CLIP_STD
A_ = do_convert_rgb
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ):
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
A_ = (size["height"], size["width"])
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : List[str] , ):
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ = size if size is not None else self.size
A_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
A_ = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
A_ = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
A_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_rescale:
A_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
A_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
A_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
A_ = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase )
return encoded_outputs
| 452 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
| 705 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase_ : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A (__a , __a , __a , __a=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = XLNetConfig.from_json_file(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : List[Any] = finetuning_task
SCREAMING_SNAKE_CASE_ : Optional[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE_ : Dict = XLNetForSequenceClassification(__a )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE_ : str = finetuning_task
SCREAMING_SNAKE_CASE_ : Dict = XLNetForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ : List[str] = XLNetLMHeadModel(__a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__a , __a , __a )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(__a , __a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(__a , __a )
print(f'Save PyTorch model to {os.path.abspath(__a )}' )
torch.save(model.state_dict() , __a )
print(f'Save configuration file to {os.path.abspath(__a )}' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase_ : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 176 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Optional[Any]=None , **lowerCamelCase__ :Dict ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
UpperCamelCase__ :Optional[int] = model
UpperCamelCase__ :int = kwargs.get("""model_save_dir""" , snake_case__ )
UpperCamelCase__ :Dict = kwargs.get("""latest_model_name""" , snake_case__ )
def __call__( self :Optional[Any] , **lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Dict = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def __a ( lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Dict=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
UpperCamelCase__ :int = '''CPUExecutionProvider'''
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def __a ( self :Dict , lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :Optional[str] = None , **lowerCamelCase__ :int ):
UpperCamelCase__ :List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase__ :int = self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase__ :int = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase__ :Tuple = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
UpperCamelCase__ :Optional[int] = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, os.PathLike] , **lowerCamelCase__ :str , ):
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def __a ( cls :Optional[Any] , lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :Optional[Union[bool, str, None]] = None , lowerCamelCase__ :Optional[Union[str, None]] = None , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional["ort.SessionOptions"] = None , **lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
UpperCamelCase__ :Dict = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
UpperCamelCase__ :str = Path(snake_case__ )
# load model from hub
else:
# download model
UpperCamelCase__ :Optional[Any] = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
UpperCamelCase__ :str = Path(snake_case__ ).parent
UpperCamelCase__ :str = Path(snake_case__ ).name
UpperCamelCase__ :Tuple = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :bool = True , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , **lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :List[str] = None
if len(str(snake_case__ ).split("""@""" ) ) == 2:
UpperCamelCase__ :List[Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , ) | 45 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase__ : List[str] = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger()
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : LevitConfig , __lowerCAmelCase : Path , __lowerCAmelCase : bool = True ) -> int:
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
snake_case = timm.create_model("""levit_128s""" , pretrained=__lowerCAmelCase )
else:
snake_case = timm.create_model("""levit_128""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 1_92:
snake_case = timm.create_model("""levit_192""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 2_56:
snake_case = timm.create_model("""levit_256""" , pretrained=__lowerCAmelCase )
if hidden_sizes == 3_84:
snake_case = timm.create_model("""levit_384""" , pretrained=__lowerCAmelCase )
from_model.eval()
snake_case = LevitForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
snake_case = OrderedDict()
snake_case = from_model.state_dict()
snake_case = list(from_model.state_dict().keys() )
snake_case = list(our_model.state_dict().keys() )
print(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for i in range(len(__lowerCAmelCase ) ):
snake_case = weights[og_keys[i]]
our_model.load_state_dict(__lowerCAmelCase )
snake_case = torch.randn((2, 3, 2_24, 2_24) )
snake_case = from_model(__lowerCAmelCase )
snake_case = our_model(__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
snake_case = name
print(__lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True ) -> List[Any]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = (1, num_labels)
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
snake_case = {
"""levit-128S""": 1_28,
"""levit-128""": 1_28,
"""levit-192""": 1_92,
"""levit-256""": 2_56,
"""levit-384""": 3_84,
}
snake_case = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 369 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
snake_case = 637_8137.0
snake_case = 635_6752.31_4245
snake_case = 637_8137
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase : List[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
_lowerCAmelCase : Union[str, Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase : List[str] = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase : Optional[Any] = (b_lata + b_lata) / 2
_lowerCAmelCase : str = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase : Optional[Any] = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
_lowerCAmelCase : Tuple = cos(sigma / 2 ) ** 2
_lowerCAmelCase : Optional[Any] = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase : int = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
_lowerCAmelCase : List[Any] = sin(sigma / 2 ) ** 2
_lowerCAmelCase : Optional[int] = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = StableDiffusionXLImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
_lowerCAmelCase : Optional[Any] = CLIPTextModel(_snake_case )
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : Optional[int] = CLIPTextModelWithProjection(_snake_case )
_lowerCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=0 ):
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : str = image / 2 + 0.5
if str(_snake_case ).startswith("mps" ):
_lowerCAmelCase : str = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : Union[str, Any] = sd_pipe(**_snake_case ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : int = sd_pipe.to(_snake_case )
_lowerCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
_lowerCAmelCase : Dict = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : str = 3 * ["this is a negative prompt"]
_lowerCAmelCase : str = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs["prompt"]]
_lowerCAmelCase : Tuple = sd_pipe(**_snake_case )
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : int = 3 * ["this is a negative prompt"]
_lowerCAmelCase : List[str] = 3 * [inputs.pop("prompt" )]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = sd_pipe.encode_prompt(_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase : int = sd_pipe(
**_snake_case , prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , pooled_prompt_embeds=_snake_case , negative_pooled_prompt_embeds=_snake_case , )
_lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
_lowerCAmelCase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : Any = np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : Tuple = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase : Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : int = self.get_inputs(_snake_case )
_lowerCAmelCase : List[str] = pipe(**_snake_case ).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 587 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ : List[Any] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 390 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''vit_msn'''
def __init__( self : List[str] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[Any]=1e-06 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) ->int:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
| 390 | 1 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__snake_case = logging.getLogger(__name__)
__snake_case = """Hello world! cécé herlolip"""
__snake_case = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=lowercase , large=lowercase , share_emb=lowercase , use_bert_emb=lowercase , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
snake_case : Dict = torch.load(lowercase , lambda lowercase , lowercase : storage )
snake_case : str = AbsSummarizer(lowercase , torch.device("cpu" ) , lowercase )
original.eval()
snake_case : List[str] = BertAbsSummarizer(lowercase , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
snake_case : Dict = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
snake_case : Dict = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase )) )
snake_case : Any = torch.tensor(lowercase ).unsqueeze(0 )
snake_case : Optional[Any] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase )) )
snake_case : int = torch.tensor(lowercase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case : Dict = encoder_input_ids
snake_case : Tuple = decoder_input_ids
snake_case : int = None
snake_case : int = None
snake_case : Optional[Any] = None
snake_case : List[str] = None
snake_case : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case : Optional[Any] = original(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )[0]
snake_case : Optional[Any] = original.generator(lowercase )
snake_case : str = new_model(
lowercase , lowercase , lowercase , lowercase , lowercase )[0]
snake_case : List[Any] = new_model.generator(lowercase )
snake_case : Dict = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowercase ) )
snake_case : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowercase ) )
snake_case : Optional[Any] = torch.allclose(lowercase , lowercase , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 117 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = LayoutLMTokenizer
__UpperCAmelCase : List[Any] = LayoutLMTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = True
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = "UNwant\u00E9d,running"
snake_case : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class(self.vocab_file )
snake_case : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
| 117 | 1 |
'''simple docstring'''
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : float ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCamelCase : List[str] = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(_lowerCAmelCase )}'
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A__ :
_UpperCAmelCase :Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[Any] = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
UpperCamelCase : str = self.feature_extraction_class.from_json_file(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : int = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
UpperCamelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.feature_extraction_class()
self.assertIsNotNone(A_ )
| 629 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
a = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _snake_case ( _snake_case : Tuple , _snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( _snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = tmp_path_factory.getbasetemp() / 'cache'
_A = test_hf_cache_home / 'datasets'
_A = test_hf_cache_home / 'metrics'
_A = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_snake_case ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_snake_case ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_snake_case ) )
_A = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_snake_case ) )
_A = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_snake_case ) )
@pytest.fixture(autouse=_snake_case , scope='session' )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_snake_case )
def _snake_case ( _snake_case : Any ) -> Dict:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _snake_case )
@pytest.fixture
def _snake_case ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _snake_case )
| 707 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
a = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[SEP]" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : int="[SEP]" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.sp_model )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , _UpperCAmelCase : Optional[Any] ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] ):
if self.remove_space:
_A = ' '.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_A = unicodedata.normalize('NFKD' , _UpperCAmelCase )
_A = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str ):
_A = self.preprocess_text(_UpperCAmelCase )
_A = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
_A = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[Any] ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 505 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pixel_values'''
UpperCAmelCase__ = False
UpperCAmelCase__ = TimmBackboneConfig
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , '''timm''')
super().__init__(UpperCAmelCase__)
A__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''')
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(UpperCAmelCase__ , '''out_features''') and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''')
A__ = getattr(UpperCAmelCase__ , '''use_pretrained_backbone''' , UpperCAmelCase__)
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''')
# We just take the final layer by default. This matches the default for the transformers models.
A__ = config.out_indices if getattr(UpperCAmelCase__ , '''out_indices''' , UpperCAmelCase__) is not None else (-1,)
A__ = timm.create_model(
config.backbone , pretrained=UpperCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase__ , **UpperCAmelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A__ = self._backbone.return_layers
A__ = {layer['''module''']: str(UpperCAmelCase__) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''vision''', '''timm'''])
from ...models.timm_backbone import TimmBackboneConfig
A__ = kwargs.pop('''config''' , TimmBackboneConfig())
A__ = kwargs.pop('''use_timm_backbone''' , UpperCAmelCase__)
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''')
A__ = kwargs.pop('''num_channels''' , config.num_channels)
A__ = kwargs.pop('''features_only''' , config.features_only)
A__ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone)
A__ = kwargs.pop('''out_indices''' , config.out_indices)
A__ = TimmBackboneConfig(
backbone=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , features_only=UpperCAmelCase__ , use_pretrained_backbone=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , )
return super()._from_config(UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[str]) ->int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str]) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''')
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A__ = self._all_layers
A__ = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = self._return_layers
A__ = tuple(hidden_states[i] for i in self.out_indices)
else:
A__ = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = None
A__ = tuple(UpperCAmelCase__)
A__ = tuple(UpperCAmelCase__) if hidden_states is not None else None
if not return_dict:
A__ = (feature_maps,)
if output_hidden_states:
A__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , attentions=UpperCAmelCase__)
| 87 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = JukeboxTokenizer
UpperCAmelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 87 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
UpperCAmelCase_ : Dict = {"""target_lang""": """fi""", """source_lang""": """en"""}
UpperCAmelCase_ : Optional[Any] = """>>zh<<"""
UpperCAmelCase_ : Union[str, Any] = """Helsinki-NLP/"""
if is_torch_available():
UpperCAmelCase_ : Optional[Any] = """pt"""
elif is_tf_available():
UpperCAmelCase_ : Tuple = """tf"""
else:
UpperCAmelCase_ : List[Any] = """jax"""
@require_sentencepiece
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MarianTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : str = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(_a , range(len(_a))))
SCREAMING_SNAKE_CASE_ : int = Path(self.tmpdirname)
save_json(_a , save_dir / VOCAB_FILES_NAMES['''vocab'''])
save_json(_a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''source_spm'''])
copyfile(_a , save_dir / VOCAB_FILES_NAMES['''target_spm'''])
SCREAMING_SNAKE_CASE_ : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **_a)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''</s>'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a) , _a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a) , _a)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(_a) , 9)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de')
SCREAMING_SNAKE_CASE_ : Tuple = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_a)
self.assertIsInstance(_a , _a)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_a , batch.input_ids[0])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_a)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [x.name for x in Path(_a).glob('''*''')]
self.assertIn('''source.spm''' , _a)
MarianTokenizer.from_pretrained(_a)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors=_a)
self.assertIsInstance(_a , _a)
self.assertEqual(batch.input_ids.shape , (2, 512))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_a , return_tensors=_a)
self.assertIsInstance(_a , _a)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''')
SCREAMING_SNAKE_CASE_ : Tuple = '''Tämä on testi'''
SCREAMING_SNAKE_CASE_ : List[str] = '''This is a test'''
SCREAMING_SNAKE_CASE_ : Dict = [76, 7, 2047, 2]
SCREAMING_SNAKE_CASE_ : int = [69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE_ : int = tokenizer(_a).input_ids
self.assertListEqual(_a , _a)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(text_target=_a).input_ids
self.assertListEqual(_a , _a)
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_a , skip_special_tokens=_a)
self.assertEqual(_a , _a)
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _A (__a , __a , __a = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(__a ), magnitude * sin(__a )]
return [magnitude * cos(radians(__a ) ), magnitude * sin(radians(__a ) )]
def _A (__a , __a , __a = 10**-1 ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : NDArray[floataa] = cross(__a , __a )
SCREAMING_SNAKE_CASE_ : float = sum(__a )
return abs(__a ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ : Union[str, Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
UpperCAmelCase_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase_ : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ : List[str] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
UpperCAmelCase_ : Any = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 176 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
for param in module.parameters():
_SCREAMING_SNAKE_CASE =False
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_SCREAMING_SNAKE_CASE ='mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =datetime.now()
_SCREAMING_SNAKE_CASE =current_time.strftime('%H:%M:%S' )
return timestamp
| 405 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowerCamelCase : Union[str, Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
lowerCamelCase : int = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class A__ ( pl.LightningModule ):
def __init__( self : Optional[Any] , _a : argparse.Namespace , _a : Any=None , _a : int="base" , _a : Dict=None , _a : Tuple=None , _a : Any=None , **_a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_a )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =Path(self.hparams.output_dir )
_SCREAMING_SNAKE_CASE =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_a , **_a , )
else:
_SCREAMING_SNAKE_CASE =config
_SCREAMING_SNAKE_CASE =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _a , _a ):
assert hasattr(self.config , _a ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , _a , getattr(self.hparams , _a ) )
if tokenizer is None:
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =MODEL_MODES[mode]
if model is None:
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =model
def A ( self : List[Any] , *_a : Optional[int] , **_a : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(*_a , **_a )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =arg_to_scheduler[self.hparams.lr_scheduler]
_SCREAMING_SNAKE_CASE =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_SCREAMING_SNAKE_CASE ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model
_SCREAMING_SNAKE_CASE =['bias', 'LayerNorm.weight']
_SCREAMING_SNAKE_CASE =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_SCREAMING_SNAKE_CASE =Adafactor(
_a , lr=self.hparams.learning_rate , scale_parameter=_a , relative_step=_a )
else:
_SCREAMING_SNAKE_CASE =AdamW(
_a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_SCREAMING_SNAKE_CASE =optimizer
_SCREAMING_SNAKE_CASE =self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : Tuple , _a : Dict , _a : List[str] ) -> str:
'''simple docstring'''
return self.validation_step(_a , _a )
def A ( self : Dict , _a : str ) -> Dict:
'''simple docstring'''
return self.validation_end(_a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_SCREAMING_SNAKE_CASE =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : int , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
_SCREAMING_SNAKE_CASE =len(self.test_dataloader().dataset )
else:
_SCREAMING_SNAKE_CASE =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_a )
_SCREAMING_SNAKE_CASE =len(self.train_dataloader().dataset )
def A ( self : Union[str, Any] , _a : str , _a : int , _a : bool = False ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def A ( self : str ) -> str:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : int , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_a , list(filter(_a , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : Optional[Any] , _a : Dict[str, Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.output_dir.joinpath('best_tfmr' )
_SCREAMING_SNAKE_CASE =self.step_count
self.model.save_pretrained(_a )
self.tokenizer.save_pretrained(_a )
@staticmethod
def A ( _a : Any , _a : Any ) -> List[str]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=_a , type=_a , required=_a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_a , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_a , type=_a , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_a ).parent / 'test_run' / 'cache' ) , type=_a , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_a , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_a , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_a , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_a , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=_a , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_a , metavar=_a , type=_a , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_a , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_a , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_a )
parser.add_argument('--train_batch_size' , default=32 , type=_a )
parser.add_argument('--eval_batch_size' , default=32 , type=_a )
parser.add_argument('--adafactor' , action='store_true' )
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : int ) -> Dict:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : Tuple ) -> Tuple:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_a )
class A__ ( pl.Callback ):
def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =trainer.lr_schedulers[0]['scheduler']
_SCREAMING_SNAKE_CASE ={f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_a )
def A ( self : List[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> Optional[int]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log results
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def A ( self : Optional[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log and save results to file
_SCREAMING_SNAKE_CASE =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_a , 'w' ) as writer:
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> None:
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=_UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_UpperCamelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_UpperCamelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_UpperCamelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_UpperCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=_UpperCamelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _lowerCAmelCase ( _UpperCamelCase : BaseTransformer , _UpperCamelCase : argparse.Namespace , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Any=[] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , **_UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_SCREAMING_SNAKE_CASE =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_SCREAMING_SNAKE_CASE =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_UpperCamelCase )
if logging_callback is None:
_SCREAMING_SNAKE_CASE =LoggingCallback()
_SCREAMING_SNAKE_CASE ={}
if args.fpaa:
_SCREAMING_SNAKE_CASE =16
if args.gpus > 1:
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE ='ddp'
_SCREAMING_SNAKE_CASE =args.accumulate_grad_batches
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE =pl.Trainer.from_argparse_args(
_UpperCamelCase , weights_summary=_UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCamelCase , )
if args.do_train:
trainer.fit(_UpperCamelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 405 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
lowercase_ = logging.getLogger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
lowercase__ = {
'repo_id': str(_SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'git_log.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=4 )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if params.n_gpu <= 0:
lowercase__ = 0
lowercase__ = -1
lowercase__ = True
lowercase__ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ = int(os.environ['WORLD_SIZE'] )
lowercase__ = int(os.environ['N_GPU_NODE'] )
lowercase__ = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase__ = params.world_size // params.n_gpu_per_node
lowercase__ = params.global_rank // params.n_gpu_per_node
lowercase__ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ = 1
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1
lowercase__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ = params.node_id == 0 and params.local_rank == 0
lowercase__ = params.n_nodes > 1
# summary
lowercase__ = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 707 |
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = 'ylacombe/bark-small'
A = tempfile.mkdtemp()
A = 'en_speaker_1'
A = 'This is a test string'
A = 'speaker_embeddings_path.json'
A = 'speaker_embeddings'
def _SCREAMING_SNAKE_CASE ( self : Any ,**A_ : Any ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=A_ )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
A = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
A = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='(BOS)' ,eos_token='(EOS)' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
A = 35
A = 2
A = 8
A = {
'semantic_prompt': np.ones(A_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string ,voice_preset=A_ )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname ,'file.npz' )
np.savez(A_ ,**A_ )
A = processor(text=self.input_string ,voice_preset=A_ )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string ,voice_preset=self.voice_preset )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=A_ )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string ,padding='max_length' ,max_length=256 ,add_special_tokens=A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() ) | 91 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : int = MgpstrTokenizer
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : Any = False
def UpperCamelCase ( self : Any ) -> Dict:
super().setUp()
# fmt: off
snake_case: List[Any] =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case: Union[str, Any] =dict(zip(a_ , range(len(a_ ) ) ) )
snake_case: List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a_ ) + '\n' )
def UpperCamelCase ( self : Optional[Any] , **a_ : List[Any] ) -> str:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : int , a_ : Dict ) -> int:
snake_case: Any ='tester'
snake_case: List[str] ='tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase ( self : Any ) -> Dict:
pass
def UpperCamelCase ( self : Optional[int] ) -> int:
snake_case: int =self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case: Optional[Any] ='[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
snake_case: List[str] =tokenizer.encode([special_token] , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
snake_case: Union[str, Any] =tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def UpperCamelCase ( self : int ) -> str:
snake_case: int =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case , snake_case: Dict =self.get_input_output_texts(a_ )
snake_case: Dict =tokenizer.tokenize(a_ )
snake_case: str =tokenizer.convert_tokens_to_ids(a_ )
snake_case: List[Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
snake_case: str =tokenizer.convert_ids_to_tokens(a_ )
self.assertNotEqual(len(a_ ) , 0 )
snake_case: Tuple =tokenizer.decode(a_ )
self.assertIsInstance(a_ , a_ )
self.assertEqual(text_a.replace(' ' , '' ) , a_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase ( self : Dict ) -> str:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
| 350 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : List[Any] = PegasusConfig
lowercase__ : Tuple = {}
lowercase__ : Any = """gelu"""
def __init__( self : Any , lowercase : List[str] , lowercase : List[Any]=13 , lowercase : Any=7 , lowercase : Union[str, Any]=True , lowercase : Any=False , lowercase : Optional[int]=99 , lowercase : int=32 , lowercase : str=5 , lowercase : Union[str, Any]=4 , lowercase : Optional[Any]=37 , lowercase : Optional[int]=0.1 , lowercase : int=0.1 , lowercase : Dict=20 , lowercase : str=2 , lowercase : Any=1 , lowercase : str=0 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def snake_case__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def snake_case__ ( self : str , lowercase : List[str] , lowercase : List[str] , lowercase : int ) -> int:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__lowercase = model.decode(lowercase , lowercase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def snake_case__ ( self : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = 20
__lowercase = model_class_name(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] )
__lowercase , __lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__lowercase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> Tuple:
if attention_mask is None:
__lowercase = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ : Union[str, Any] = True
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = FlaxPegasusModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase )
def snake_case__ ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def snake_case__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase : int , lowercase : Dict=None , **lowercase : Any ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__lowercase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowercase )
__lowercase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowercase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase : Tuple , lowercase : Tuple , lowercase : Optional[Any] ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__lowercase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__lowercase = np.ones((1, 1) )
__lowercase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__lowercase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__lowercase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__lowercase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__lowercase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=512 , padding=lowercase )
__lowercase = model.generate(**lowercase , num_beams=2 ).sequences
__lowercase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 634 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """sew"""
def __init__( self : List[Any] , lowercase : int=32 , lowercase : List[str]=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : str=3_072 , lowercase : Optional[int]=2 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Any=0.0 , lowercase : Dict=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[str]=0.02 , lowercase : Dict=1E-5 , lowercase : Tuple="group" , lowercase : int="gelu" , lowercase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase : List[str]=False , lowercase : Tuple=128 , lowercase : int=16 , lowercase : Union[str, Any]=True , lowercase : List[str]=0.05 , lowercase : Optional[int]=10 , lowercase : Any=2 , lowercase : Optional[Any]=0.0 , lowercase : Optional[Any]=10 , lowercase : int=0 , lowercase : Optional[int]="mean" , lowercase : List[Any]=False , lowercase : str=False , lowercase : int=256 , lowercase : str=0 , lowercase : List[Any]=1 , lowercase : List[Any]=2 , **lowercase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = list(lowercase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = squeeze_factor
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# sequence classification
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
@property
def snake_case__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 634 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Union[List[PIL.Image.Image], np.ndarray]
UpperCamelCase_ : Optional[List[bool]]
UpperCamelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 470 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def A_ ( lowercase ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCAmelCase_ : Any = dict(zip(lowercase , lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 470 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_: Optional[int] = 'src/diffusers'
lowercase_: List[str] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_: List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_: Optional[int] = spec.loader.load_module()
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
return line.startswith(UpperCAmelCase_) or len(UpperCAmelCase_) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase_) is not None
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : int = object_name.split(""".""")
snake_case__ : int = 0
# First let's find the module where our object lives.
snake_case__ : Tuple = parts[i]
while i < len(UpperCAmelCase_) and not os.path.isfile(os.path.join(UpperCAmelCase_ , F'{module}.py')):
i += 1
if i < len(UpperCAmelCase_):
snake_case__ : List[str] = os.path.join(UpperCAmelCase_ , parts[i])
if i >= len(UpperCAmelCase_):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.')
with open(os.path.join(UpperCAmelCase_ , F'{module}.py') , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
snake_case__ : Any = f.readlines()
# Now let's find the class / func in the code!
snake_case__ : Optional[Any] = """"""
snake_case__ : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase_) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase_):
raise ValueError(F' {object_name} does not match any function or class in {module}.')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case__ : Dict = line_index
while line_index < len(UpperCAmelCase_) and _should_continue(lines[line_index] , UpperCAmelCase_):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
snake_case__ : int = lines[start_index:line_index]
return "".join(UpperCAmelCase_)
lowercase_: str = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowercase_: List[str] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowercase_: Tuple = re.compile(r'<FILL\s+[^>]*>')
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = code.split("""\n""")
snake_case__ : Any = 0
while idx < len(UpperCAmelCase_) and len(lines[idx]) == 0:
idx += 1
if idx < len(UpperCAmelCase_):
return re.search(R"""^(\s*)\S""" , lines[idx]).groups()[0]
return ""
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = len(get_indent(UpperCAmelCase_)) > 0
if has_indent:
snake_case__ : Optional[Any] = F'class Bla:\n{code}'
snake_case__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase_)
snake_case__ : Union[str, Any] = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_)
snake_case__ , snake_case__ : Union[str, Any] = style_docstrings_in_code(UpperCAmelCase_)
return result[len("""class Bla:\n""") :] if has_indent else result
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=False):
"""simple docstring"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
snake_case__ : Dict = f.readlines()
snake_case__ : Dict = []
snake_case__ : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase_):
snake_case__ : Union[str, Any] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case__ , snake_case__ , snake_case__ : Tuple = search.groups()
snake_case__ : Optional[int] = find_code_in_diffusers(UpperCAmelCase_)
snake_case__ : Any = get_indent(UpperCAmelCase_)
snake_case__ : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case__ : Optional[int] = theoretical_indent
snake_case__ : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case__ : Any = True
while line_index < len(UpperCAmelCase_) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase_):
break
snake_case__ : Any = lines[line_index]
snake_case__ : Tuple = _should_continue(UpperCAmelCase_ , UpperCAmelCase_) and re.search(F'^{indent}# End copy' , UpperCAmelCase_) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
snake_case__ : Union[str, Any] = lines[start_index:line_index]
snake_case__ : Any = """""".join(UpperCAmelCase_)
# Remove any nested `Copied from` comments to avoid circular copies
snake_case__ : Union[str, Any] = [line for line in theoretical_code.split("""\n""") if _re_copy_warning.search(UpperCAmelCase_) is None]
snake_case__ : Optional[Any] = """\n""".join(UpperCAmelCase_)
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase_) > 0:
snake_case__ : List[Any] = replace_pattern.replace("""with""" , """""").split(""",""")
snake_case__ : int = [_re_replace_pattern.search(UpperCAmelCase_) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case__ , snake_case__ , snake_case__ : List[str] = pattern.groups()
snake_case__ : str = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if option.strip() == "all-casing":
snake_case__ : Tuple = re.sub(obja.lower() , obja.lower() , UpperCAmelCase_)
snake_case__ : Tuple = re.sub(obja.upper() , obja.upper() , UpperCAmelCase_)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code)
snake_case__ : Dict = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
snake_case__ : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case__ : str = start_index + 1
if overwrite and len(UpperCAmelCase_) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.')
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(UpperCAmelCase_)
return diffs
def _lowercase ( UpperCAmelCase_ = False):
"""simple docstring"""
snake_case__ : int = glob.glob(os.path.join(UpperCAmelCase_ , """**/*.py""") , recursive=UpperCAmelCase_)
snake_case__ : int = []
for filename in all_files:
snake_case__ : Tuple = is_copy_consistent(UpperCAmelCase_ , UpperCAmelCase_)
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase_) > 0:
snake_case__ : Tuple = """\n""".join(UpperCAmelCase_)
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""")
if __name__ == "__main__":
lowercase_: Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase_: Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 127 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_: Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_: List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase_: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 127 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Dict =RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
snake_case: List[str] =torch.load(hf_hub_download(repo_id=__lowerCamelCase , filename='pytorch_model.bin' ) )
snake_case: int ={}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
snake_case: Dict ='roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
snake_case: int =tensor_value
snake_case: Optional[Any] =RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
snake_case: List[Any] =AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 350 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase ( __lowerCamelCase : str ) -> None:
__lowerCAmelCase , __lowerCAmelCase =analyze_text(__lowerCamelCase )
__lowerCAmelCase =list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__lowerCAmelCase =sum(single_char_strings.values() )
# one length string
__lowerCAmelCase =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowerCAmelCase =single_char_strings[ch]
__lowerCAmelCase =my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
__lowerCAmelCase =sum(two_char_strings.values() )
__lowerCAmelCase =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowerCAmelCase =cha + cha
if sequence in two_char_strings:
__lowerCAmelCase =two_char_strings[sequence]
__lowerCAmelCase =int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase ( __lowerCamelCase : str ) -> tuple[dict, dict]:
__lowerCAmelCase =Counter() # type: ignore
__lowerCAmelCase =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ( ) -> str:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 354 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
lowerCAmelCase_, lowerCAmelCase_ : List[str] = head.next, head
while fast and fast.next:
lowerCAmelCase_ : Optional[int] = fast.next.next
lowerCAmelCase_ : Union[str, Any] = slow.next
lowerCAmelCase_ : List[Any] = slow.next
lowerCAmelCase_ : Any = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase_ : Union[str, Any] = None
while second:
lowerCAmelCase_ : Optional[Any] = second.next
lowerCAmelCase_ : Union[str, Any] = node
lowerCAmelCase_ : Tuple = second
lowerCAmelCase_ : str = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase_ : int = node.next
lowerCAmelCase_ : int = head.next
return True
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase_ : Tuple = head
while fast and fast.next:
lowerCAmelCase_, lowerCAmelCase_ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase_ : List[str] = [slow.val]
while slow.next:
lowerCAmelCase_ : List[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase_ : str = cur.next
return True
def UpperCamelCase_ ( A__ : Optional[Any] ):
'''simple docstring'''
if not head or not head.next:
return True
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(A__ )
else:
lowerCAmelCase_ : int = [pos]
lowerCAmelCase_ : Optional[Any] = head.next
pos += 1
lowerCAmelCase_ : int = pos - 1
lowerCAmelCase_ : str = 0
for v in d.values():
if len(A__ ) % 2 != 0:
middle += 1
else:
lowerCAmelCase_ : Any = 0
for i in range(0 , len(A__ ) ):
if v[i] + v[len(A__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 398 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if not isinstance(A__ , A__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
lowerCAmelCase_ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 | 1 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ = len(_lowercase )
lowerCAmelCase_ = sum(_lowercase )
lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase_ = True
for i in range(1 , s + 1 ):
lowerCAmelCase_ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase_ = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase_ = s - 2 * j
break
return diff | 552 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = 1, 1
lowerCAmelCase_ = []
for i in range(1 , n + 1 ):
lowerCAmelCase_ = prev_numerator + 2 * prev_denominator
lowerCAmelCase_ = prev_numerator + prev_denominator
if len(str(_lowercase ) ) > len(str(_lowercase ) ):
result.append(_lowercase )
lowerCAmelCase_ = numerator
lowerCAmelCase_ = denominator
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 552 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=1_2_5 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ :List[Any] = [F"<extra_id_{i}>" for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ :Optional[Any] = len(set(filter(lambda __UpperCAmelCase : bool('extra_id' in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
lowerCAmelCase__ :int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :str = extra_ids
lowerCAmelCase__ :str = 2**8 # utf is 8 bits
# define special tokens dict
lowerCAmelCase__ :Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCAmelCase__ :Tuple = len(self.special_tokens_encoder )
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
for i, token in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ :str = self.vocab_size + i - n
lowerCAmelCase__ :Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Any = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ :List[str] = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = [chr(__UpperCAmelCase ) for i in text.encode('utf-8' )]
return tokens
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.special_tokens_encoder:
lowerCAmelCase__ :Optional[int] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCAmelCase__ :Optional[int] = self.added_tokens_encoder[token]
elif len(__UpperCAmelCase ) != 1:
lowerCAmelCase__ :Optional[Any] = self.unk_token_id
else:
lowerCAmelCase__ :Optional[int] = ord(__UpperCAmelCase ) + self._num_special_tokens
return token_id
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if index in self.special_tokens_decoder:
lowerCAmelCase__ :int = self.special_tokens_decoder[index]
else:
lowerCAmelCase__ :Any = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = B''
for token in tokens:
if token in self.special_tokens_decoder:
lowerCAmelCase__ :Optional[int] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
lowerCAmelCase__ :List[str] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
lowerCAmelCase__ :Optional[Any] = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
lowerCAmelCase__ :List[Any] = token.encode('utf-8' )
else:
lowerCAmelCase__ :Tuple = bytes([ord(__UpperCAmelCase )] )
bstring += tok_string
lowerCAmelCase__ :Dict = bstring.decode('utf-8' , errors='ignore' )
return string
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return ()
| 560 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A = 3
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
print('Generating primitive root of p' )
while True:
lowerCAmelCase__ :Dict = random.randrange(3 , _SCREAMING_SNAKE_CASE )
if pow(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE ) == 1:
continue
if pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) == 1:
continue
return g
def __A (_SCREAMING_SNAKE_CASE ) ->tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('Generating prime p...' )
lowerCAmelCase__ :Dict = rabin_miller.generate_large_prime(_SCREAMING_SNAKE_CASE ) # select large prime number.
lowerCAmelCase__ :Tuple = primitive_root(_SCREAMING_SNAKE_CASE ) # one primitive root on modulo p.
lowerCAmelCase__ :List[Any] = random.randrange(3 , _SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety.
lowerCAmelCase__ :int = cryptomath.find_mod_inverse(pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = (key_size, e_a, e_a, p)
lowerCAmelCase__ :List[str] = (key_size, d)
return public_key, private_key
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('\nWARNING:' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCAmelCase__ , lowerCAmelCase__ :Dict = generate_key(_SCREAMING_SNAKE_CASE )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , 'w' ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , 'w' ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def __A () ->None:
"""simple docstring"""
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 560 | 1 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
"""simple docstring"""
def _UpperCAmelCase ( self : Any , lowerCAmelCase : Dict ):
raise NotImplementedError()
def _UpperCAmelCase ( self : Optional[Any] ):
raise NotImplementedError()
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] = False , **lowerCAmelCase : Any ):
A_ = tokenizer
A_ = skip_prompt
A_ = decode_kwargs
# variables used in the streaming process
A_ = []
A_ = 0
A_ = True
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
A_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
A_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
A_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
A_ = text[self.print_len :]
A_ = []
A_ = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
A_ = text[self.print_len :]
self.print_len += len(lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
A_ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase )
self.on_finalized_text(lowerCAmelCase )
def _UpperCAmelCase ( self : Dict ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
A_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
A_ = text[self.print_len :]
A_ = []
A_ = 0
else:
A_ = ""
A_ = True
self.on_finalized_text(lowerCAmelCase , stream_end=lowerCAmelCase )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] = False ):
print(lowerCAmelCase , flush=lowerCAmelCase , end="" if not stream_end else None )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] = False , lowerCAmelCase : List[str] = None , **lowerCAmelCase : Optional[Any] ):
super().__init__(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
A_ = Queue()
A_ = None
A_ = timeout
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Any = False ):
self.text_queue.put(lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[Any] ):
return self
def _UpperCAmelCase ( self : List[str] ):
A_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 452 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = LayoutLMTokenizer
UpperCAmelCase__ = LayoutLMTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = '''unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [7, 4, 5, 10, 8, 9] )
def _lowercase (self ):
"""simple docstring"""
pass | 628 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a = ["gpt2"]
a = "gpt2"
if is_tf_available():
class __a ( tf.Module ):
def __init__( self : Tuple ,lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case_ )
__SCREAMING_SNAKE_CASE = TFGPTaLMHeadModel.from_config(snake_case_ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="""text""" ),) )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(snake_case_ )
__SCREAMING_SNAKE_CASE = tokenized["""input_ids"""].to_tensor()
__SCREAMING_SNAKE_CASE = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__SCREAMING_SNAKE_CASE = self.model(input_ids=snake_case_ ,attention_mask=snake_case_ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [GPTaTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__SCREAMING_SNAKE_CASE = [TFGPTaTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__SCREAMING_SNAKE_CASE = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__SCREAMING_SNAKE_CASE = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE = tokenizer([test_inputs] ,return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__SCREAMING_SNAKE_CASE = python_outputs[key].numpy()
__SCREAMING_SNAKE_CASE = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case_ ,tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.function(snake_case_ )
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE = tf.constant(snake_case_ )
__SCREAMING_SNAKE_CASE = compiled_tokenizer(snake_case_ )
__SCREAMING_SNAKE_CASE = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = ModelToSave(tokenizer=snake_case_ )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = model.serving(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE = Path(snake_case_ ) / """saved.model"""
tf.saved_model.save(snake_case_ ,snake_case_ ,signatures={"""serving_default""": model.serving} )
__SCREAMING_SNAKE_CASE = tf.saved_model.load(snake_case_ )
__SCREAMING_SNAKE_CASE = loaded_model.signatures["""serving_default"""](snake_case_ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = tf_tokenizer(snake_case_ ) # Build model with some sample inputs
__SCREAMING_SNAKE_CASE = tf_tokenizer.get_config()
__SCREAMING_SNAKE_CASE = TFGPTaTokenizer.from_config(snake_case_ )
__SCREAMING_SNAKE_CASE = model_from_config(snake_case_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__SCREAMING_SNAKE_CASE = 12_3123
for max_length in [3, 5, 1024]:
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE = tf_tokenizer(snake_case_ ,max_length=snake_case_ )
__SCREAMING_SNAKE_CASE = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 109 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase_ = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _UpperCAmelCase ( _lowerCamelCase : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
_lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
_lowerCAmelCase : str = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_lowerCAmelCase : Tuple = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 384 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase_ : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
__magic_name__ = PegasusConfig
__magic_name__ = {}
__magic_name__ = '''gelu'''
def __init__( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=20 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Optional[int]=0 , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Any = eos_token_id
UpperCAmelCase_ : Dict = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase_ : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : List[Any] = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : List[Any] = 20
UpperCAmelCase_ : Tuple = model_class_name(lowerCAmelCase_ )
UpperCAmelCase_ : str = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = model.decode(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = 20
UpperCAmelCase_ : Optional[int] = model_class_name(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ : Union[str, Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : List[str] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
UpperCAmelCase_ : Any = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case ( A__ ,A__ ,A__ ,A__=None ,A__=None ,):
if attention_mask is None:
UpperCAmelCase_ : Dict = np.not_equal(A__ ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : Optional[int] = FlaxPegasusModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model_class(lowerCAmelCase_ )
@jax.jit
def encode_jitted(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : str ):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[str] = encode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Dict = encode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : Dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Optional[Any] = decode_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : int = decode_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Any = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = np.ones((1, 1) )
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCAmelCase_ : List[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCAmelCase_ : Tuple = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ : List[str] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase_ , return_tensors="np" , truncation=lowerCAmelCase_ , max_length=512 , padding=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = model.generate(**lowerCAmelCase_ , num_beams=2 ).sequences
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
assert tgt_text == decoded
| 701 |
"""simple docstring"""
lowerCamelCase_ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowerCamelCase_ = {value: key for key, value in encode_dict.items()}
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case ( A__ ):
if set(A__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase_ : Dict = ""
for word in coded.split():
while len(A__ ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase_ : str = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 463 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[int] = LEDTokenizer
__lowerCamelCase : Union[str, Any] = LEDTokenizerFast
__lowerCamelCase : int = True
def _snake_case ( self ) -> Tuple:
super().setUp()
_lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase = {"unk_token": "<unk>"}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def _snake_case ( self , **_lowerCAmelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> Tuple:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _snake_case ( self ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_torch
def _snake_case ( self ) -> str:
_lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertNotIn("labels" , _lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" , _lowerCAmelCase )
@require_torch
def _snake_case ( self ) -> Any:
_lowerCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def _snake_case ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = ["A long paragraph for summarization."]
_lowerCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = tokenizer(text_target=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = inputs["input_ids"]
_lowerCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self ) -> List[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = ["Summary of the text.", "Another summary."]
_lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCAmelCase = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase )
_lowerCAmelCase = [[0] * len(_lowerCAmelCase ) for x in encoded_output["input_ids"]]
_lowerCAmelCase = tokenizer.pad(_lowerCAmelCase )
self.assertSequenceEqual(outputs["global_attention_mask"] , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = "A, <mask> AllenNLP sentence."
_lowerCAmelCase = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 18 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = JukeboxTokenizer
a__ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
import torch
A : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
A : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
A : List[Any] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
import torch
A : int = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
A : str = tokenizer(**self.metas )["input_ids"]
# fmt: off
A : List[str] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 711 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 17 | 0 |
'''simple docstring'''
from string import ascii_uppercase
lowercase__ = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
snake_case : Optional[Any] = ''
snake_case : Tuple = 0
snake_case : Dict = 0
while div != 1:
snake_case : Optional[int] = divmod(lowerCamelCase__ , lowerCamelCase__ )
if base >= 11 and 9 < mod < 36:
snake_case : Any = ALPHABET_VALUES[str(lowerCamelCase__ )]
else:
snake_case : List[str] = str(lowerCamelCase__ )
new_value += actual_value
snake_case : Any = num // base
snake_case : Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowerCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 638 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : str = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Any = '''instructblip_vision_model'''
def __init__( self , _SCREAMING_SNAKE_CASE=1408 , _SCREAMING_SNAKE_CASE=6144 , _SCREAMING_SNAKE_CASE=39 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : str = intermediate_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = qkv_bias
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Union[str, Any] = '''instructblip_qformer'''
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1408 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = position_embedding_type
SCREAMING_SNAKE_CASE_ : Tuple = cross_attention_frequency
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = '''instructblip'''
SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=32 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE_ : str = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipVisionConfig(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipQFormerConfig(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING[text_model_type](**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE_ : List[str] = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE_ : int = num_query_tokens
SCREAMING_SNAKE_CASE_ : Any = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : Dict = 1.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : str = self.__class__.model_type
return output
| 353 |
from __future__ import annotations
import math
lowerCAmelCase : List[str] = '2020.9.26'
lowerCAmelCase : List[Any] = 'xcodz-dot, cclaus, dhruvmanila'
def A_ ( a , a , a , a , a ):
"""simple docstring"""
if not all(isinstance(a , (float, int) ) for val in locals().values() ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ((x * distance) / (z + distance)) * scale
SCREAMING_SNAKE_CASE_ : int = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_ ( a , a , a , a , a ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError('Axis must be a str' )
SCREAMING_SNAKE_CASE_ : Any = locals()
del input_variables["axis"]
if not all(isinstance(a , (float, int) ) for val in input_variables.values() ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'Input values except axis must either be float or int: '
f"{list(input_variables.values() )}"
)
raise TypeError(a )
SCREAMING_SNAKE_CASE_ : List[Any] = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
SCREAMING_SNAKE_CASE_ : List[str] = x * math.cos(a ) - y * math.sin(a )
SCREAMING_SNAKE_CASE_ : List[Any] = y * math.cos(a ) + x * math.sin(a )
SCREAMING_SNAKE_CASE_ : Dict = z
elif axis == "x":
SCREAMING_SNAKE_CASE_ : Any = y * math.cos(a ) - z * math.sin(a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = z * math.cos(a ) + y * math.sin(a )
SCREAMING_SNAKE_CASE_ : str = x
elif axis == "y":
SCREAMING_SNAKE_CASE_ : str = x * math.cos(a ) - z * math.sin(a )
SCREAMING_SNAKE_CASE_ : List[str] = z * math.cos(a ) + x * math.sin(a )
SCREAMING_SNAKE_CASE_ : Any = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 353 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowercase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> Union[str, Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =[line.strip() for line in open(UpperCAmelCase_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE_ : List[Any] =pd.read_csv(UpperCAmelCase_ , sep='''\t''' , header=UpperCAmelCase_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ : List[str] =[line.strip() for line in open(UpperCAmelCase_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE_ : Dict =[[reference] for reference in references]
SCREAMING_SNAKE_CASE_ : Tuple =0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =100.0 * em / total
SCREAMING_SNAKE_CASE_ : Optional[int] =100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : int =args.k
SCREAMING_SNAKE_CASE_ : Dict =[line.strip() for line in open(UpperCAmelCase_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE_ : Tuple =[line.strip() for line in open(UpperCAmelCase_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE_ : Any =0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple =set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE_ : Dict =set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE_ : Any =100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> int:
def strip_title(UpperCAmelCase_ : List[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] =title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE_ : Tuple =title[:-1]
return title
SCREAMING_SNAKE_CASE_ : List[Any] =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors='''pt''' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE_ : List[Any] =rag_model.rag.question_encoder(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =question_enc_outputs[0]
SCREAMING_SNAKE_CASE_ : Tuple =rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
for docs in all_docs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[strip_title(UpperCAmelCase_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(UpperCAmelCase_ ) )
return provenance_strings
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ) -> List[Any]:
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors='''pt''' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE_ : Optional[int] =inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE_ : Optional[int] =rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info('''Q: {} - A: {}'''.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def SCREAMING_SNAKE_CASE_ ( ) -> int:
SCREAMING_SNAKE_CASE_ : Any =argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=UpperCAmelCase_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=UpperCAmelCase_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=UpperCAmelCase_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=UpperCAmelCase_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=UpperCAmelCase_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=UpperCAmelCase_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=UpperCAmelCase_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=UpperCAmelCase_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=UpperCAmelCase_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=UpperCAmelCase_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=UpperCAmelCase_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=5_0 , type=UpperCAmelCase_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE_ : List[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : int ={}
if args.model_type is None:
SCREAMING_SNAKE_CASE_ : Tuple =infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE_ : Dict =RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE_ : List[Any] =args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE_ : Dict =args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE_ : Tuple =args.index_path
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =BartForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Dict =(
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE_ : Optional[Any] =evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(UpperCAmelCase_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE_ : int =RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE_ : str =[]
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE_ : Dict =evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write('''\n'''.join(UpperCAmelCase_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] =evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write('''\n'''.join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowercase = get_args()
main(args)
| 443 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = """▁"""
_lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = BigBirdTokenizer
__lowerCamelCase = BigBirdTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def _snake_case ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ : str =self.tokenizer_class(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str ='''<s>'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(__A ) , 1_004 )
def _snake_case ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _snake_case ( self ) -> str:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.tokenize(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : List[Any] =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : Dict =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : int =tokenizer.encode(__A )
SCREAMING_SNAKE_CASE_ : Dict =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] =BigBirdTokenizer(__A , keep_accents=__A )
SCREAMING_SNAKE_CASE_ : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ : Dict =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _snake_case ( self ) -> Union[str, Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : str ='''Hello World!'''
SCREAMING_SNAKE_CASE_ : Dict =[65, 18_536, 2_260, 101, 66]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] =[65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def _snake_case ( self ) -> int:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE_ : Optional[int] =list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE_ : Any =''' '''.join(__A )
SCREAMING_SNAKE_CASE_ : int =self.big_tokenizer.encode_plus(__A , return_tensors='''pt''' , return_token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Tuple =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =BigBirdConfig(attention_type='''original_full''' )
SCREAMING_SNAKE_CASE_ : Tuple =BigBirdModel(__A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Any =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _snake_case ( self ) -> Any:
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] ={'''input_ids''': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 443 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : str
__snake_case : str = None
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
'''simple docstring'''
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,**lowerCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ) -> Dict:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class UpperCamelCase__ ( snake_case__ ):
'''simple docstring'''
__snake_case : List[str] = """optuna"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
return is_optuna_available()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase_ )
class UpperCamelCase__ ( snake_case__ ):
'''simple docstring'''
__snake_case : Any = """ray"""
__snake_case : Tuple = """'ray[tune]'"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
return is_ray_available()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,**lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase_ )
class UpperCamelCase__ ( snake_case__ ):
'''simple docstring'''
__snake_case : Union[str, Any] = """sigopt"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase_ )
class UpperCamelCase__ ( snake_case__ ):
'''simple docstring'''
__snake_case : Optional[int] = """wandb"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
return is_wandb_available()
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE = available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
F"""{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 702 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 116 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ (_UpperCAmelCase):
return getitem, k
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return setitem, k, v
def lowerCamelCase__ (_UpperCAmelCase):
return delitem, k
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase):
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase), None
except Exception as e:
return None, e
a_ : Dict = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ : Any = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ : Any = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ : str = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ : Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items'),
pytest.param(_overwrite_items , id='overwrite items'),
pytest.param(_delete_items , id='delete items'),
pytest.param(_access_absent_items , id='access absent items'),
pytest.param(_add_with_resize_up , id='add with resize up'),
pytest.param(_add_with_resize_down , id='add with resize down'),
) , )
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase)
assert my_res == py_res
assert str(_UpperCAmelCase) == str(_UpperCAmelCase)
assert set(_UpperCAmelCase) == set(_UpperCAmelCase)
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ():
def is_public(_UpperCAmelCase) -> bool:
return not name.startswith('_')
SCREAMING_SNAKE_CASE = {name for name in dir({}) if is_public(_UpperCAmelCase)}
SCREAMING_SNAKE_CASE = {name for name in dir(HashMap()) if is_public(_UpperCAmelCase)}
assert dict_public_names > hash_public_names
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase__ ( snake_case__ , snake_case__ ):
snake_case_ = '''bit'''
snake_case_ = ['''preactivation''', '''bottleneck''']
snake_case_ = ['''SAME''', '''VALID''']
def __init__( self , A__=3 , A__=64 , A__=[256, 512, 1024, 2048] , A__=[3, 4, 6, 3] , A__="preactivation" , A__="relu" , A__=None , A__=32 , A__=0.0 , A__=False , A__=32 , A__=1 , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_: str = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: List[Any] = embedding_size
UpperCAmelCase_: Union[str, Any] = hidden_sizes
UpperCAmelCase_: int = depths
UpperCAmelCase_: Tuple = layer_type
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: str = global_padding
UpperCAmelCase_: Dict = num_groups
UpperCAmelCase_: Dict = drop_path_rate
UpperCAmelCase_: Tuple = embedding_dynamic_padding
UpperCAmelCase_: Tuple = output_stride
UpperCAmelCase_: Dict = width_factor
UpperCAmelCase_: str = ["stem"] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: Tuple = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names ) | 137 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCamelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__UpperCamelCase : List[str] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__UpperCamelCase : Tuple = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__UpperCamelCase : Union[str, Any] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__UpperCamelCase : Tuple = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : Dict ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__lowerCamelCase : Dict = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def _UpperCAmelCase ( UpperCAmelCase : dict , UpperCAmelCase : dict ):
"""simple docstring"""
__lowerCamelCase : int = BigBirdPegasusConfig(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = BigBirdPegasusForConditionalGeneration(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = torch_model.state_dict()
__lowerCamelCase : Optional[int] = {}
# separating decoder weights
__lowerCamelCase : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__lowerCamelCase : Tuple = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__lowerCamelCase : List[str] = [k.endswith(UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase ):
continue
__lowerCamelCase : List[Any] = DECODER_PATTERNS
__lowerCamelCase : Any = rename_state_dict_key(UpperCAmelCase , UpperCAmelCase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__lowerCamelCase : str = v.T
__lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__lowerCamelCase : Dict = [k.endswith(UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase ):
continue
__lowerCamelCase : List[Any] = REMAINING_PATTERNS
__lowerCamelCase : List[Any] = rename_state_dict_key(UpperCAmelCase , UpperCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__lowerCamelCase : str = v.T
__lowerCamelCase : List[Any] = torch.from_numpy(UpperCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase : Tuple = mapping["""model.embed_positions.weight"""]
__lowerCamelCase : Optional[int] = mapping.pop("""model.embed_positions.weight""" )
__lowerCamelCase , __lowerCamelCase : Tuple = torch_model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
__lowerCamelCase : Dict = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCAmelCase ( UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : Any = tf.train.list_variables(UpperCAmelCase )
__lowerCamelCase : Any = {}
__lowerCamelCase : Optional[Any] = ["""global_step"""]
for name, shape in tqdm(UpperCAmelCase , desc="""converting tf checkpoint to dict""" ):
__lowerCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase : Union[str, Any] = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : int = array
return tf_weights
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : dict ):
"""simple docstring"""
__lowerCamelCase : List[Any] = get_tf_weights_as_numpy(UpperCAmelCase )
__lowerCamelCase : Dict = convert_bigbird_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 458 |
def _UpperCAmelCase ( UpperCAmelCase : list ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = len(UpperCAmelCase )
for _ in range(UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase , __lowerCamelCase : List[str] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__UpperCamelCase : str = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 458 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : Any = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCAmelCase__ : Optional[Any] = {"mobilebert-uncased": 5_12}
UpperCAmelCase__ : Dict = {}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = VOCAB_FILES_NAMES
snake_case__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[int] = PRETRAINED_INIT_CONFIGURATION
snake_case__ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Union[str, Any] = MobileBertTokenizer
def __init__( self : List[Any] , __magic_name__ : str=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=True , __magic_name__ : Tuple="[UNK]" , __magic_name__ : int="[SEP]" , __magic_name__ : List[str]="[PAD]" , __magic_name__ : List[str]="[CLS]" , __magic_name__ : int="[MASK]" , __magic_name__ : Dict=True , __magic_name__ : Any=None , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __magic_name__ ) != do_lower_case
or normalizer_state.get("strip_accents" , __magic_name__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __magic_name__ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__magic_name__ , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__magic_name__ )
lowerCAmelCase__ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 48 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "roberta-prelayernorm"
def __init__( self : Optional[int], UpperCamelCase__ : List[Any]=5_02_65, UpperCamelCase__ : int=7_68, UpperCamelCase__ : str=12, UpperCamelCase__ : Union[str, Any]=12, UpperCamelCase__ : Tuple=30_72, UpperCamelCase__ : Tuple="gelu", UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : Union[str, Any]=0.1, UpperCamelCase__ : Any=5_12, UpperCamelCase__ : Union[str, Any]=2, UpperCamelCase__ : Optional[Any]=0.02, UpperCamelCase__ : str=1e-12, UpperCamelCase__ : str=1, UpperCamelCase__ : List[str]=0, UpperCamelCase__ : Optional[int]=2, UpperCamelCase__ : Optional[Any]="absolute", UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : List[str]=None, **UpperCamelCase__ : Any, ) -> Dict:
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 107 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase_ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = {}
state_dict.pop('''pixel_mean''' , snake_case_)
state_dict.pop('''pixel_std''' , snake_case_)
_a = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(snake_case_ , snake_case_)
if re.match(snake_case_ , snake_case_):
_a = int(re.match(snake_case_ , snake_case_).group(2))
if layer_nb == 0:
_a = key.replace('''layers.0''' , '''proj_in''')
elif layer_nb == 1:
_a = key.replace('''layers.1''' , '''layers.0''')
elif layer_nb == 2:
_a = key.replace('''layers.2''' , '''proj_out''')
_a = value
_a = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowerCAmelCase (__A , __A , __A , __A="ybelkada/segment-anything"):
"""simple docstring"""
_a = hf_hub_download(snake_case_ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_a = SamConfig(
vision_config=snake_case_ , )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_a = SamConfig(
vision_config=snake_case_ , )
_a = torch.load(snake_case_ , map_location='''cpu''')
_a = replace_keys(snake_case_)
_a = SamImageProcessor()
_a = SamProcessor(image_processor=snake_case_)
_a = SamModel(snake_case_)
hf_model.load_state_dict(snake_case_)
_a = hf_model.to('''cuda''')
_a = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
_a = Image.open(requests.get(snake_case_ , stream=snake_case_).raw).convert('''RGB''')
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(snake_case_) , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_a = hf_model(**snake_case_)
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(snake_case_) , input_points=snake_case_ , input_labels=snake_case_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_a = hf_model(**snake_case_)
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(snake_case_) , input_boxes=snake_case_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_a = hf_model(**snake_case_)
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(snake_case_) , input_points=snake_case_ , input_labels=snake_case_ , return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
_a = hf_model(**snake_case_)
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
lowercase_ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
lowercase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 710 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a__ (self , A=0 ) -> str:
"""simple docstring"""
_a = floats_tensor((1, 3, 128, 128) , rng=random.Random(A ) )
_a = np.random.RandomState(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a__ (self ) -> str:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> int:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
# warmup pass to apply optimizations
_a = pipe(**self.get_dummy_inputs() )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> Dict:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ (self ) -> Any:
"""simple docstring"""
_a = ort.SessionOptions()
_a = False
return options
def a__ (self ) -> str:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
_a = '''A fantasy landscape, trending on artstation'''
_a = np.random.RandomState(0 )
_a = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type='''np''' , )
_a = output.images
_a = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_a = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a = init_image.resize((768, 512) )
_a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
_a = '''A fantasy landscape, trending on artstation'''
_a = np.random.RandomState(0 )
_a = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type='''np''' , )
_a = output.images
_a = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_a = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 352 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Union[str, Any] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__lowerCAmelCase : int = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
__lowerCAmelCase : List[str] = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=False ):
if concatenate_texts:
return compute_measures(a_ , a_ )["wer"]
else:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for prediction, reference in zip(a_ , a_ ):
lowerCAmelCase__ = compute_measures(a_ , a_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 644 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a__ : Optional[int] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'facebook/nllb-200-distilled-600M'
snake_case_ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case_ = 'translator'
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = LANGUAGE_CODES
snake_case_ = ['text', 'text', 'text']
snake_case_ = ['text']
def _UpperCamelCase ( self : Dict , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : List[Any] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
lowerCamelCase__ = self.lang_to_code[src_lang]
lowerCamelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a_ , return_tensors="""pt""" , src_lang=a_ , tgt_lang=a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : Optional[int] ):
"""simple docstring"""
return self.model.generate(**a_ )
def _UpperCamelCase ( self : List[str] , a_ : str ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a_ )
| 165 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__magic_name__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__magic_name__ = model(__UpperCamelCase )['last_hidden_state']
__magic_name__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice.
__magic_name__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 184 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
__magic_name__ = AutoTokenizer.from_pretrained('google/mt5-small' )
__magic_name__ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__magic_name__ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__magic_name__ = model(input_ids.to(__UpperCamelCase ) , labels=labels.to(__UpperCamelCase ) ).loss
__magic_name__ = -(labels.shape[-1] * loss.item())
__magic_name__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 184 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 384
if "tiny" in model_name:
__SCREAMING_SNAKE_CASE : Tuple = [3, 3, 9, 3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [96, 192, 384, 768]
if "small" in model_name:
__SCREAMING_SNAKE_CASE : int = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : List[str] = [96, 192, 384, 768]
if "base" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : List[Any] = [128, 256, 512, 1_024]
__SCREAMING_SNAKE_CASE : Tuple = 512
if "large" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : Dict = [192, 384, 768, 1_536]
__SCREAMING_SNAKE_CASE : str = 768
if "xlarge" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 3, 27, 3]
__SCREAMING_SNAKE_CASE : List[str] = [256, 512, 1_024, 2_048]
__SCREAMING_SNAKE_CASE : str = 1_024
# set label information
__SCREAMING_SNAKE_CASE : List[str] = 150
__SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : List[Any] = '''ade20k-id2label.json'''
__SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConvNextConfig(
depths=snake_case , hidden_sizes=snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__SCREAMING_SNAKE_CASE : int = UperNetConfig(
backbone_config=snake_case , auxiliary_in_channels=snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case , )
return config
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = dct.pop(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__SCREAMING_SNAKE_CASE : Tuple = model_name_to_url[model_name]
__SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(snake_case , map_location='''cpu''' )['''state_dict''']
__SCREAMING_SNAKE_CASE : Dict = get_upernet_config(snake_case )
__SCREAMING_SNAKE_CASE : Dict = UperNetForSemanticSegmentation(snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(snake_case )
if "bn" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''bn''' , '''batch_norm''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
# rename keys
__SCREAMING_SNAKE_CASE : int = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
model.load_state_dict(snake_case )
# verify on image
__SCREAMING_SNAKE_CASE : Optional[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE : Tuple = SegformerImageProcessor()
__SCREAMING_SNAKE_CASE : Tuple = processor(snake_case , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
if model_name == "upernet-convnext-tiny":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : str = logging.get_logger(__name__)
snake_case : List[str] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = '''deformable_detr'''
UpperCAmelCase__ : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :Optional[int] ,__snake_case :List[Any]=True ,__snake_case :str=None ,__snake_case :Optional[Any]=3 ,__snake_case :int=3_00 ,__snake_case :Optional[int]=10_24 ,__snake_case :Union[str, Any]=6 ,__snake_case :Optional[int]=10_24 ,__snake_case :List[str]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :int=10_24 ,__snake_case :List[str]=8 ,__snake_case :List[str]=0.0 ,__snake_case :Optional[int]=True ,__snake_case :Any="relu" ,__snake_case :List[str]=2_56 ,__snake_case :List[str]=0.1 ,__snake_case :Dict=0.0 ,__snake_case :Optional[int]=0.0 ,__snake_case :List[Any]=0.02 ,__snake_case :Union[str, Any]=1.0 ,__snake_case :List[str]=True ,__snake_case :Union[str, Any]=False ,__snake_case :List[Any]="sine" ,__snake_case :Tuple="resnet50" ,__snake_case :Dict=True ,__snake_case :Tuple=False ,__snake_case :str=4 ,__snake_case :Union[str, Any]=4 ,__snake_case :List[Any]=4 ,__snake_case :Optional[Any]=False ,__snake_case :str=3_00 ,__snake_case :Tuple=False ,__snake_case :Union[str, Any]=1 ,__snake_case :str=5 ,__snake_case :str=2 ,__snake_case :Dict=1 ,__snake_case :Any=1 ,__snake_case :Union[str, Any]=5 ,__snake_case :Tuple=2 ,__snake_case :Any=0.1 ,__snake_case :str=0.25 ,__snake_case :int=False ,**__snake_case :Optional[int] ,) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__snake_case ,__snake_case ):
a__ = backbone_config.get('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(__snake_case )
a__ = use_timm_backbone
a__ = backbone_config
a__ = num_channels
a__ = num_queries
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = init_xavier_std
a__ = encoder_layerdrop
a__ = auxiliary_loss
a__ = position_embedding_type
a__ = backbone
a__ = use_pretrained_backbone
a__ = dilation
# deformable attributes
a__ = num_feature_levels
a__ = encoder_n_points
a__ = decoder_n_points
a__ = two_stage
a__ = two_stage_num_proposals
a__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
a__ = class_cost
a__ = bbox_cost
a__ = giou_cost
# Loss coefficients
a__ = mask_loss_coefficient
a__ = dice_loss_coefficient
a__ = bbox_loss_coefficient
a__ = giou_loss_coefficient
a__ = eos_coefficient
a__ = focal_alpha
a__ = disable_custom_kernels
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def lowerCamelCase__( self :int ) -> int:
return self.d_model
def lowerCamelCase__( self :List[str] ) -> str:
a__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a__ = self.backbone_config.to_dict()
a__ = self.__class__.model_type
return output
| 335 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
snake_case_ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def A__ ( SCREAMING_SNAKE_CASE_ ) -> int:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase : List[Any] =list(s_dict.keys() )
for key in keys:
lowerCamelCase : Dict =R'''.*/layers_(\d+)'''
lowerCamelCase : Optional[int] =key
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] =re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =R'''(encoder|decoder)\/'''
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict =re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups()
if groups[0] == "encoder":
lowerCamelCase : Dict =re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
elif groups[0] == "decoder":
lowerCamelCase : List[str] =re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase : Dict =new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"{key} -> {new_key}" )
lowerCamelCase : Optional[Any] =s_dict.pop(SCREAMING_SNAKE_CASE_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Dict =s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Union[str, Any] =s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase : List[Any] =s_dict[key].shape[0]
lowerCamelCase : int =s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
snake_case_ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
lowerCamelCase : str =f.read()
lowerCamelCase : Union[str, Any] =re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase : str =float(SCREAMING_SNAKE_CASE_ ) if '''.''' in value else int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase : Any =str(activation[1] )
lowerCamelCase : Tuple =num_experts
lowerCamelCase : Any =SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ )
return config
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="./" , SCREAMING_SNAKE_CASE_=8 ) -> Dict:
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
lowerCamelCase : List[Any] =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
if gin_file is not None:
lowerCamelCase : Dict =convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =flax_params['''target''']
lowerCamelCase : Optional[int] =flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
lowerCamelCase : str =rename_keys(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] =unflatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
snake_case_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 262 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = '''▁'''
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
snake_case_ = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
snake_case_ = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Union[str, Any] = ["input_ids", "attention_mask"]
lowerCamelCase :List[int] = []
lowerCamelCase :List[int] = []
def __init__( self , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="<unk>" , __lowercase="m2m100" , __lowercase = None , __lowercase=8 , **__lowercase , ) -> None:
lowerCamelCase : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : List[str] =language_codes
lowerCamelCase : int =FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase : str ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
lowerCamelCase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowercase , tgt_lang=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , language_codes=__lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowercase , **__lowercase , )
lowerCamelCase : Dict =vocab_file
lowerCamelCase : List[Any] =load_json(__lowercase )
lowerCamelCase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowerCamelCase : List[Any] =spm_file
lowerCamelCase : str =load_spm(__lowercase , self.sp_model_kwargs )
lowerCamelCase : Tuple =len(self.encoder )
lowerCamelCase : Optional[int] ={
self.get_lang_token(__lowercase ): self.encoder_size + i for i, lang_code in enumerate(__lowercase )
}
lowerCamelCase : Tuple ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowercase )}
lowerCamelCase : Tuple ={v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase : Optional[Any] =src_lang if src_lang is not None else '''en'''
lowerCamelCase : Any =tgt_lang
lowerCamelCase : List[Any] =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase : Optional[Any] =num_madeup_words
@property
def __lowercase ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self , __lowercase ) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def __lowercase ( self , __lowercase ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowercase , self.encoder[self.unk_token] )
def __lowercase ( self , __lowercase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowercase , self.unk_token )
def __lowercase ( self , __lowercase ) -> str:
lowerCamelCase : Dict =[]
lowerCamelCase : Dict =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
lowerCamelCase : str =[]
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __lowercase ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
lowerCamelCase : int =[1] * len(self.prefix_tokens )
lowerCamelCase : List[str] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def __lowercase ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self ) -> Dict:
lowerCamelCase : Dict ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCamelCase : Optional[Any] =self.__dict__.copy()
lowerCamelCase : Union[str, Any] =None
return state
def __setstate__( self , __lowercase ) -> None:
lowerCamelCase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase : Optional[int] ={}
lowerCamelCase : Optional[Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def __lowercase ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
lowerCamelCase : Optional[Any] =Path(__lowercase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
lowerCamelCase : List[str] =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase : Tuple =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowercase )
elif not os.path.isfile(self.spm_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCamelCase : List[Any] =self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (str(__lowercase ), str(__lowercase ))
def __lowercase ( self , __lowercase , __lowercase = "en" , __lowercase = None , __lowercase = "ro" , **__lowercase , ) -> BatchEncoding:
lowerCamelCase : Union[str, Any] =src_lang
lowerCamelCase : Optional[int] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , **__lowercase ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase : List[Any] =src_lang
lowerCamelCase : str =self(__lowercase , add_special_tokens=__lowercase , **__lowercase )
lowerCamelCase : Tuple =self.get_lang_id(__lowercase )
lowerCamelCase : Optional[int] =tgt_lang_id
return inputs
def __lowercase ( self ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ) -> List[str]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Union[str, Any] =self.get_lang_token(__lowercase )
lowerCamelCase : List[Any] =self.lang_token_to_id[lang_token]
lowerCamelCase : Optional[Any] =[self.cur_lang_id]
lowerCamelCase : Union[str, Any] =[self.eos_token_id]
def __lowercase ( self , __lowercase ) -> None:
lowerCamelCase : Tuple =self.get_lang_token(__lowercase )
lowerCamelCase : Tuple =self.lang_token_to_id[lang_token]
lowerCamelCase : List[Any] =[self.cur_lang_id]
lowerCamelCase : Tuple =[self.eos_token_id]
def __lowercase ( self , __lowercase ) -> str:
return self.lang_code_to_token[lang]
def __lowercase ( self , __lowercase ) -> int:
lowerCamelCase : List[str] =self.get_lang_token(__lowercase )
return self.lang_token_to_id[lang_token]
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> sentencepiece.SentencePieceProcessor:
lowerCamelCase : List[Any] =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE_ )
spm.Load(str(SCREAMING_SNAKE_CASE_ ) )
return spm
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Union[Dict, List]:
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=2 )
| 262 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 513 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = TransfoXLTokenizer
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Any:
super().setUp()
snake_case : List[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **A ) -> Any:
snake_case : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Any = """<unk> UNwanted , running"""
snake_case : int = """<unk> unwanted, running"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
snake_case : Union[str, Any] = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(A , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = TransfoXLTokenizer(lower_case=A )
snake_case : int = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
snake_case : int = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = self.get_tokenizer()
snake_case : int = len(A )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 587 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(UpperCamelCase_ ) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = [sequences]
__magic_name__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase_=ZeroShotClassificationArgumentHandler() , *UpperCamelCase_ , **UpperCamelCase_ ):
__magic_name__ = args_parser
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def lowerCAmelCase__ ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=TruncationStrategy.ONLY_FIRST , **UpperCamelCase_ ):
__magic_name__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
__magic_name__ = self.tokenizer.eos_token
try:
__magic_name__ = self.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , )
except Exception as e:
if "too short" in str(UpperCamelCase_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__magic_name__ = self.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
if kwargs.get('''multi_class''' , UpperCamelCase_ ) is not None:
__magic_name__ = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
__magic_name__ = {}
if "candidate_labels" in kwargs:
__magic_name__ = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
__magic_name__ = kwargs['''hypothesis_template''']
__magic_name__ = {}
if "multi_label" in kwargs:
__magic_name__ = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ , ):
if len(UpperCamelCase_ ) == 0:
pass
elif len(UpperCamelCase_ ) == 1 and "candidate_labels" not in kwargs:
__magic_name__ = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="This example is {}." ):
__magic_name__ , __magic_name__ = self._args_parser(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ ) ):
__magic_name__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase_ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = inputs['''candidate_label''']
__magic_name__ = inputs['''sequence''']
__magic_name__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
__magic_name__ = self.model(**UpperCamelCase_ )
__magic_name__ = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
__magic_name__ = [outputs['''candidate_label'''] for outputs in model_outputs]
__magic_name__ = [outputs['''sequence'''] for outputs in model_outputs]
__magic_name__ = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
__magic_name__ = logits.shape[0]
__magic_name__ = len(UpperCamelCase_ )
__magic_name__ = N // n
__magic_name__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCamelCase_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__magic_name__ = self.entailment_id
__magic_name__ = -1 if entailment_id == 0 else 0
__magic_name__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
__magic_name__ = np.exp(UpperCamelCase_ ) / np.exp(UpperCamelCase_ ).sum(-1 , keepdims=UpperCamelCase_ )
__magic_name__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__magic_name__ = reshaped_outputs[..., self.entailment_id]
__magic_name__ = np.exp(UpperCamelCase_ ) / np.exp(UpperCamelCase_ ).sum(-1 , keepdims=UpperCamelCase_ )
__magic_name__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 190 |
"""simple docstring"""
from pathlib import Path
import fire
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
__magic_name__ = Path(__UpperCamelCase )
__magic_name__ = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
__magic_name__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
__magic_name__ = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 190 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def _lowerCamelCase ( lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : BnbQuantizationConfig , lowerCamelCase__ : Union[str, os.PathLike] = None , lowerCamelCase__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , lowerCamelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase__ : bool = False , ):
lowercase__ : Optional[int] = bnb_quantization_config.load_in_abit
lowercase__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowercase__ : str = []
# custom device map
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(device_map.keys() ) > 1:
lowercase__ : int = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__ : Any = get_keys_to_not_convert(lowerCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCamelCase__ )
lowercase__ : Tuple = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__ : Optional[Any] = []
lowercase__ : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCamelCase__ )
# compatibility with peft
lowercase__ : List[str] = load_in_abit
lowercase__ : List[Any] = load_in_abit
lowercase__ : Union[str, Any] = get_parameter_device(lowerCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowercase__ : Dict = replace_with_bnb_layers(lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ )
# convert param to the right dtype
lowercase__ : int = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__ : Optional[int] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowercase__ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCamelCase__ ):
param.to(lowerCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase__ : Optional[Any] = replace_with_bnb_layers(
lowerCamelCase__ , lowerCamelCase__ , modules_to_not_convert=lowerCamelCase__ )
lowercase__ : str = get_quantized_model_device_map(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , max_memory=lowerCamelCase__ , no_split_module_classes=lowerCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__ : List[str] = True
lowercase__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCamelCase__ , offload_state_dict=lowerCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCamelCase__ , device_map=lowerCamelCase__ , offload_dir=lowerCamelCase__ )
def _lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase__ : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowercase__ : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__ : Optional[int] = {}
lowercase__ : str = special_dtypes
lowercase__ : Union[str, Any] = no_split_module_classes
lowercase__ : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__ : List[str] = get_balanced_memory(
lowerCamelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ : Dict = max_memory
lowercase__ : Any = infer_auto_device_map(lowerCamelCase__ , **lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# check if don't have any quantized module on the cpu
lowercase__ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__ : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None ):
if modules_to_not_convert is None:
lowercase__ : int = []
lowercase__ , lowercase__ : Dict = _replace_with_bnb_layers(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : int=None , ):
lowercase__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__ : Any = []
current_key_name.append(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__ : List[str] = """.""".join(lowerCamelCase__ )
lowercase__ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__ : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__ : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowercase__ : Optional[int] = module.weight.data
if module.bias is not None:
lowercase__ : Dict = module.bias.data
bnb_module.requires_grad_(lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Tuple = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ : Optional[Any] = _replace_with_bnb_layers(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCamelCase ( lowerCamelCase__ : Any ):
# Create a copy of the model
with init_empty_weights():
lowercase__ : Any = deepcopy(lowerCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__ : Any = find_tied_parameters(lowerCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ : List[Any] = sum(lowerCamelCase__ , [] )
lowercase__ : List[Any] = len(lowerCamelCase__ ) > 0
# Check if it is a base model
lowercase__ : List[str] = False
if hasattr(lowerCamelCase__ , """base_model_prefix""" ):
lowercase__ : List[Any] = not hasattr(lowerCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ : List[Any] = list(model.named_children() )
lowercase__ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ : Tuple = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowercase__ : int = list(set(lowerCamelCase__ ) ) + list(lowerCamelCase__ )
# remove ".weight" from the keys
lowercase__ : Dict = [""".weight""", """.bias"""]
lowercase__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ : Union[str, Any] = name.replace(lowerCamelCase__ , """""" )
filtered_module_names.append(lowerCamelCase__ )
return filtered_module_names
def _lowerCamelCase ( lowerCamelCase__ : Dict ):
for m in model.modules():
if isinstance(lowerCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def _lowerCamelCase ( lowerCamelCase__ : nn.Module ):
return next(parameter.parameters() ).device
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , 0 , dtype=lowerCamelCase__ , value=lowerCamelCase__ )
lowercase__ : Optional[int] = param_name
lowercase__ : str = model
if "." in tensor_name:
lowercase__ : List[str] = tensor_name.split(""".""" )
for split in splits[:-1]:
lowercase__ : List[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowercase__ : Optional[Any] = new_module
lowercase__ : List[Any] = splits[-1]
# offload weights
lowercase__ : str = False
offload_weight(module._parameters[tensor_name] , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowerCamelCase__ , index=lowerCamelCase__ , )
else:
offload_weight(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index=lowerCamelCase__ )
offload_weight(lowerCamelCase__ , param_name.replace("""weight""" , """SCB""" ) , lowerCamelCase__ , index=lowerCamelCase__ )
set_module_tensor_to_device(lowerCamelCase__ , lowerCamelCase__ , """meta""" , dtype=lowerCamelCase__ , value=torch.empty(*param.size() ) ) | 200 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowerCamelCase ( lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowercase__ : List[str] = precision
lowercase__ : Optional[int] = ceil(precision / 14 )
lowercase__ : Union[str, Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
lowercase__ : Optional[Any] = 1
lowercase__ : Optional[Any] = 13_59_14_09
lowercase__ : List[Any] = Decimal(lowerCamelCase__ )
for k in range(1 , lowerCamelCase__ ):
lowercase__ : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase__ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__snake_case = 50
print(F"The first {n} digits of pi is: {pi(n)}") | 200 | 1 |
from collections import defaultdict
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ) -> Union[str, Any]:
__magic_name__ : Tuple = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__magic_name__ : Optional[int] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase_ ) )
]
__magic_name__ : List[Any] = defaultdict(lowerCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__magic_name__ : Optional[int] = (1 << len(lowerCamelCase_ )) - 1
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any ) -> List[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__magic_name__ : Optional[Any] = self.count_ways_until(lowerCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__magic_name__ : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : int ) -> List[Any]:
# Store the list of persons for each task
for i in range(len(lowerCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowerCamelCase : Tuple = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 501 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase ( _lowerCamelCase ,_lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ ='''nat'''
UpperCamelCase__ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , lowerCamelCase_ : Dict=4 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : List[str]=64 , lowerCamelCase_ : Union[str, Any]=[3, 4, 6, 5] , lowerCamelCase_ : List[Any]=[2, 4, 8, 16] , lowerCamelCase_ : List[Any]=7 , lowerCamelCase_ : Union[str, Any]=3.0 , lowerCamelCase_ : int=True , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Tuple=1E-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : str=None , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : Any , ) -> Any:
super().__init__(**lowerCamelCase_ )
__magic_name__ : List[Any] = patch_size
__magic_name__ : str = num_channels
__magic_name__ : Union[str, Any] = embed_dim
__magic_name__ : Dict = depths
__magic_name__ : str = len(lowerCamelCase_ )
__magic_name__ : List[str] = num_heads
__magic_name__ : Optional[int] = kernel_size
__magic_name__ : Any = mlp_ratio
__magic_name__ : Any = qkv_bias
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Any = attention_probs_dropout_prob
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : str = hidden_act
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : str = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
__magic_name__ : str = layer_scale_init_value
__magic_name__ : Dict = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
__magic_name__ , __magic_name__ : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 501 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.