code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_snake_case = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
"""simple docstring"""
import string
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
_snake_case = ''''''
for i in sequence:
_snake_case = ord(__lowerCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__lowerCamelCase )] if c in letters else c for c in sequence )
def _UpperCAmelCase ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
_snake_case = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=__lowerCamelCase )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=__lowerCamelCase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 224 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Optional[int] , a: int , a: int , a: float , **a: Any ):
__lowerCamelCase : List[Any] = feature_size
__lowerCamelCase : str = sampling_rate
__lowerCamelCase : str = padding_value
__lowerCamelCase : List[Any] = kwargs.pop('padding_side' , 'right' )
__lowerCamelCase : Optional[Any] = kwargs.pop('return_attention_mask' , a )
super().__init__(**a )
def _snake_case ( self: Union[str, Any] , a: Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , a: Union[bool, str, PaddingStrategy] = True , a: Optional[int] = None , a: bool = False , a: Optional[int] = None , a: Optional[bool] = None , a: Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(a , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowerCamelCase : List[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
__lowerCamelCase : Dict = processed_features[self.model_input_names[0]]
__lowerCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(a ) == 0:
if return_attention_mask:
__lowerCamelCase : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowerCamelCase : str = required_input[0]
if isinstance(a , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowerCamelCase : Union[str, Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(a ):
__lowerCamelCase : Union[str, Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(a ):
__lowerCamelCase : str = 'tf'
elif is_torch_tensor(a ):
__lowerCamelCase : int = 'pt'
elif isinstance(a , (int, float, list, tuple, np.ndarray) ):
__lowerCamelCase : Optional[Any] = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(a )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowerCamelCase : int = to_numpy(a )
else:
__lowerCamelCase : Optional[int] = [to_numpy(a ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowerCamelCase : List[Any] = self._get_padding_strategies(padding=a , max_length=a )
__lowerCamelCase : Optional[int] = processed_features[self.model_input_names[0]]
__lowerCamelCase : Union[str, Any] = len(a )
if not all(len(a ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__lowerCamelCase : str = []
for i in range(a ):
__lowerCamelCase : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowerCamelCase : Tuple = self._truncate(
a , max_length=a , pad_to_multiple_of=a , truncation=a , )
truncated_inputs.append(a )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowerCamelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowerCamelCase : Dict = PaddingStrategy.MAX_LENGTH
__lowerCamelCase : Tuple = {}
for i in range(a ):
# padding
__lowerCamelCase : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowerCamelCase : Optional[int] = []
if value.dtype is np.dtype(np.floataa ):
__lowerCamelCase : str = value.astype(np.floataa )
batch_outputs[key].append(a )
return BatchFeature(a , tensor_type=a )
def _snake_case ( self: Union[str, Any] , a: Union[Dict[str, np.ndarray], BatchFeature] , a: Optional[int] = None , a: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a: Optional[int] = None , a: Optional[bool] = None , ):
__lowerCamelCase : Dict = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowerCamelCase : Tuple = len(a )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase : Optional[int] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(a ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowerCamelCase : str = np.ones(len(a ) , dtype=np.intaa )
if needs_to_be_padded:
__lowerCamelCase : List[str] = max_length - len(a )
if self.padding_side == "right":
if return_attention_mask:
__lowerCamelCase : Union[str, Any] = np.pad(
processed_features['attention_mask'] , (0, difference) )
__lowerCamelCase : List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowerCamelCase : Dict = np.pad(
a , a , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowerCamelCase : str = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__lowerCamelCase : Tuple = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowerCamelCase : Tuple = np.pad(
a , a , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def _snake_case ( self: Any , a: Union[Dict[str, np.ndarray], BatchFeature] , a: Optional[int] = None , a: Optional[int] = None , a: Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__lowerCamelCase : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase : Any = len(a ) > max_length
if needs_to_be_truncated:
__lowerCamelCase : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowerCamelCase : Any = processed_features['attention_mask'][:max_length]
return processed_features
def _snake_case ( self: Optional[Any] , a: Union[str, Any]=False , a: Tuple=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__lowerCamelCase : int = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(a , a ):
__lowerCamelCase : int = PaddingStrategy(a )
elif isinstance(a , a ):
__lowerCamelCase : int = padding
else:
__lowerCamelCase : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 708 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
'''simple docstring'''
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
def _snake_case ( self: str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _snake_case ( self: Dict ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _snake_case ( self: List[str] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = torch.arange(self.height * self.width )
__lowerCamelCase : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(a , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def _snake_case ( self: Optional[int] ):
__lowerCamelCase , *__lowerCamelCase : int = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(a ) )
__lowerCamelCase : Dict = self.get_image_coords()
__lowerCamelCase : Optional[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase : Tuple = self.get_camera_rays(a )
__lowerCamelCase : Union[str, Any] = rays.view(a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _snake_case ( self: Optional[Any] , a: torch.Tensor ):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Union[str, Any] = coords.view(a , -1 , 2 )
__lowerCamelCase : Dict = self.resolution()
__lowerCamelCase : List[Any] = self.fov()
__lowerCamelCase : str = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__lowerCamelCase : Dict = fracs.view(a , -1 , 2 )
__lowerCamelCase : Dict = (
self.z.view(a , 1 , 3 )
+ self.x.view(a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase : int = directions / directions.norm(dim=-1 , keepdim=a )
__lowerCamelCase : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a , *a , 2 , 3 )
def _snake_case ( self: int , a: int , a: int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a , height=a , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = []
__lowerCamelCase : Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase : Tuple = np.array([np.sin(SCREAMING_SNAKE_CASE__ ), np.cos(SCREAMING_SNAKE_CASE__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : Optional[Any] = -z * 4
__lowerCamelCase : Any = np.array([np.cos(SCREAMING_SNAKE_CASE__ ), -np.sin(SCREAMING_SNAKE_CASE__ ), 0.0] )
__lowerCamelCase : Optional[int] = np.cross(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
origins.append(SCREAMING_SNAKE_CASE__ )
xs.append(SCREAMING_SNAKE_CASE__ )
ys.append(SCREAMING_SNAKE_CASE__ )
zs.append(SCREAMING_SNAKE_CASE__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , width=SCREAMING_SNAKE_CASE__ , height=SCREAMING_SNAKE_CASE__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(SCREAMING_SNAKE_CASE__ )) , )
| 230 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: Optional[Any] =[
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case = None , snake_case = None ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = None
lowercase : Tuple = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowercase : Dict = os.path.abspath("""examples""" )
for item in os.listdir(snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowercase : str = os.path.join(snake_case , snake_case )
if os.path.isfile(snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case , feature_script=snake_case , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowercase : int = compare_against_test(
os.path.join(snake_case , snake_case ) , snake_case , snake_case , snake_case )
lowercase : int = """\n""".join(snake_case )
if special_strings is not None:
for string in special_strings:
lowercase : Optional[int] = diff.replace(snake_case , """""" )
self.assertEqual(snake_case , """""" )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , snake_case )
self.one_complete_example("""complete_nlp_example.py""" , snake_case )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowercase : Tuple = [
""" """ * 1_6 + """{\n\n""",
""" """ * 2_0 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 2_0 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 2_0 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 2_0 + """\"epoch\": epoch,\n\n""",
""" """ * 1_6 + """},\n\n""",
""" """ * 1_6 + """step=epoch,\n""",
""" """ * 1_2,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , snake_case , snake_case , snake_case )
self.one_complete_example("""complete_cv_example.py""" , snake_case , snake_case , snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class lowerCamelCase__ ( __UpperCamelCase ):
__UpperCAmelCase = False
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
"""simple docstring"""
super().setUpClass()
lowercase : int = tempfile.mkdtemp()
lowercase : List[str] = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase : Optional[int] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _UpperCAmelCase ( cls ) -> str:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowercase : Dict = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Dict = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
lowercase : int = run_command(self._launch_args + testargs , return_stdout=snake_case )
self.assertNotIn("""epoch 0:""" , snake_case )
self.assertIn("""epoch 1:""" , snake_case )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : List[str] = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
lowercase : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=snake_case )
if torch.cuda.is_available():
lowercase : Optional[Any] = torch.cuda.device_count()
else:
lowercase : Dict = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , snake_case )
self.assertIn("""epoch 1:""" , snake_case )
else:
self.assertIn("""epoch 0:""" , snake_case )
self.assertIn("""epoch 1:""" , snake_case )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowercase : List[str] = run_command(self._launch_args + testargs , return_stdout=snake_case )
lowercase : List[Any] = re.findall("""({.+})""" , snake_case )
lowercase : Dict = [r for r in results if """accuracy""" in r][-1]
lowercase : Union[str, Any] = ast.literal_eval(snake_case )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowercase : Union[str, Any] = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case , """tracking""" ) ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 607 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __snake_case ( __A ) -> Union[str, Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def __snake_case ( __A ) -> Optional[Any]:
# word like '180' or '身高' or '神'
for char in word:
lowercase : List[Any] = ord(__A )
if not _is_chinese_char(__A ):
return 0
return 1
def __snake_case ( __A ) -> Tuple:
lowercase : List[str] = set()
for token in tokens:
lowercase : Optional[Any] = len(__A ) > 1 and is_chinese(__A )
if chinese_word:
word_set.add(__A )
lowercase : List[str] = list(__A )
return word_list
def __snake_case ( __A ,__A ) -> Union[str, Any]:
if not chinese_word_set:
return bert_tokens
lowercase : Union[str, Any] = max([len(__A ) for w in chinese_word_set] )
lowercase : int = bert_tokens
lowercase , lowercase : Optional[Any] = 0, len(__A )
while start < end:
lowercase : Any = True
if is_chinese(bert_word[start] ):
lowercase : Dict = min(end - start ,__A )
for i in range(__A ,1 ,-1 ):
lowercase : List[Any] = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
lowercase : Optional[int] = """##""" + bert_word[j]
lowercase : Tuple = start + i
lowercase : int = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __A ,__A ,__A ) -> List[str]:
lowercase : Any = []
for i in range(0 ,len(__A ) ,100 ):
lowercase : List[str] = ltp_tokenizer.pipeline(lines[i : i + 100] ,tasks=["""cws"""] ).cws
lowercase : Tuple = [get_chinese_word(__A ) for r in res]
ltp_res.extend(__A )
assert len(__A ) == len(__A )
lowercase : Optional[Any] = []
for i in range(0 ,len(__A ) ,100 ):
lowercase : Tuple = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__A ,truncation=__A ,max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__A ) == len(__A )
lowercase : Optional[Any] = []
for input_ids, chinese_word in zip(__A ,__A ):
lowercase : List[str] = []
for id in input_ids:
lowercase : int = bert_tokenizer._convert_id_to_token(__A )
input_tokens.append(__A )
lowercase : Dict = add_sub_symbol(__A ,__A )
lowercase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__A ):
if token[:2] == "##":
lowercase : Dict = token[2:]
# save chinese tokens' pos
if len(__A ) == 1 and _is_chinese_char(ord(__A ) ):
ref_id.append(__A )
ref_ids.append(__A )
assert len(__A ) == len(__A )
return ref_ids
def __snake_case ( __A ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f:
lowercase : int = f.readlines()
lowercase : Union[str, Any] = [line.strip() for line in data if len(__A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase : str = LTP(args.ltp ) # faster in GPU device
lowercase : Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
lowercase : List[str] = prepare_ref(__A ,__A ,__A )
with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f:
lowercase : int = [json.dumps(__A ) + """\n""" for ref in ref_ids]
f.writelines(__A )
if __name__ == "__main__":
lowerCAmelCase: Dict =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowerCAmelCase: str =parser.parse_args()
main(args)
| 607 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple=13 , UpperCamelCase : Any=7 , UpperCamelCase : Any=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Tuple=True , UpperCamelCase : List[str]=99 , UpperCamelCase : List[str]=32 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Any=37 , UpperCamelCase : Any="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : int=512 , UpperCamelCase : Union[str, Any]=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[str]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Any=None , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : int = 13
__UpperCAmelCase : int = 7
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Dict = 99
__UpperCAmelCase : Optional[Any] = 32
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : Dict = 37
__UpperCAmelCase : Union[str, Any] = """gelu"""
__UpperCAmelCase : Any = 0.1
__UpperCAmelCase : str = 0.1
__UpperCAmelCase : int = 512
__UpperCAmelCase : List[Any] = 16
__UpperCAmelCase : Optional[int] = 2
__UpperCAmelCase : str = 0.02
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : Tuple = None
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Any = None
__UpperCAmelCase : str = None
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFRoFormerModel(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = [input_ids, input_mask]
__UpperCAmelCase : Optional[int] = model(UpperCamelCase )
__UpperCAmelCase : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Tuple = TFRoFormerForCausalLM(config=UpperCamelCase )
__UpperCAmelCase : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__UpperCAmelCase : Tuple = model(UpperCamelCase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : int = TFRoFormerForMaskedLM(config=UpperCamelCase )
__UpperCAmelCase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Dict = TFRoFormerForSequenceClassification(config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__UpperCAmelCase : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_choices
__UpperCAmelCase : Union[str, Any] = TFRoFormerForMultipleChoice(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[Any] = TFRoFormerForTokenClassification(config=UpperCamelCase )
__UpperCAmelCase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__UpperCAmelCase : Dict = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = TFRoFormerForQuestionAnswering(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__a = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFRoFormerModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(UpperCamelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__UpperCAmelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Dict = model(UpperCamelCase )[0]
# TODO Replace vocab size
__UpperCAmelCase : Optional[int] = 50_000
__UpperCAmelCase : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCAmelCase : int = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = 1E-4
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = tf.constant([[4, 10]] )
__UpperCAmelCase : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCAmelCase : str = emba(input_ids.shape )
__UpperCAmelCase : Union[str, Any] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , atol=self.tolerance )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCAmelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCAmelCase : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = 1E-4
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCAmelCase : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCAmelCase : int = embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCAmelCase : List[Any] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCamelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCamelCase , atol=self.tolerance )
| 299 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict=3 , UpperCamelCase : Dict=32 , UpperCamelCase : int=3 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Any=[8, 16, 32, 64] , UpperCamelCase : Optional[int]=[1, 1, 2, 1] , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[Any]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase : Optional[int]=[2, 3, 4] , UpperCamelCase : Any=1 , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : int = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Dict = len(UpperCamelCase )
__UpperCAmelCase : Tuple = out_features
__UpperCAmelCase : str = out_indices
__UpperCAmelCase : Optional[int] = num_groups
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
"""simple docstring"""
__a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__a = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : str ):
__UpperCAmelCase : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : List[Any] = layer_type
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[Any] = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCamelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (BitBackbone,) if is_torch_available() else ()
__a = BitConfig
__a = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = BitModelTester(self )
| 299 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCamelCase_ = BlipaProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 29 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : str ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__: str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
__magic_name__: Union[str, Any] = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
__magic_name__: Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__: int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__magic_name__: Optional[int] = CLIPTextModel(__snake_case )
__magic_name__: Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__: Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any]=0 ) -> Optional[Any]:
__magic_name__: Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__: Any = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" )
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: Optional[Any] = torch.manual_seed(__snake_case )
else:
__magic_name__: str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: List[Any] = self.get_dummy_components()
__magic_name__: int = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Union[str, Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: Tuple = sd_pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: List[Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: str = self.get_dummy_components()
__magic_name__: Dict = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: str = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: List[Any] = """french fries"""
__magic_name__: int = sd_pipe(**__snake_case , negative_prompt=__snake_case )
__magic_name__: Dict = output.images
__magic_name__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: int = self.get_dummy_components()
__magic_name__: Dict = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: str = self.get_dummy_inputs(__snake_case )
__magic_name__: List[str] = [inputs["""prompt"""]] * 2
__magic_name__: List[str] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__magic_name__: Optional[int] = torch.from_numpy(__snake_case ).unsqueeze(0 ).to(__snake_case )
__magic_name__: Tuple = image / 2 + 0.5
__magic_name__: Dict = image.permute(0 , 3 , 1 , 2 )
__magic_name__: List[str] = image.repeat(2 , 1 , 1 , 1 )
__magic_name__: str = sd_pipe(**__snake_case ).images
__magic_name__: List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__magic_name__: Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__magic_name__: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Union[str, Any] = self.get_dummy_components()
__magic_name__: Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__magic_name__: Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: Optional[int] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = self.get_dummy_inputs(__snake_case )
__magic_name__: Tuple = sd_pipe(**__snake_case ).images
__magic_name__: Any = image[0, -3:, -3:, -1]
__magic_name__: str = [round(__snake_case , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(__snake_case ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: Optional[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
__magic_name__: Tuple = self.get_dummy_components()
__magic_name__: Tuple = StableDiffusionInstructPixaPixPipeline(**__snake_case )
__magic_name__: str = VaeImageProcessor(do_resize=__snake_case , do_normalize=__snake_case )
__magic_name__: Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(__snake_case , input_image_type="""pt""" ) )[0]
__magic_name__: Union[str, Any] = components["""vae"""]
__magic_name__: str = self.get_dummy_inputs_by_type(__snake_case , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__magic_name__: int = vae.encode(inputs[image_param] ).latent_dist.mode()
__magic_name__: Dict = pipe(**__snake_case )[0]
__magic_name__: Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__snake_case , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : str , __snake_case : List[str]=0 ) -> Dict:
__magic_name__: Union[str, Any] = torch.manual_seed(__snake_case )
__magic_name__: Optional[int] = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__magic_name__: Optional[Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Optional[Any] = self.get_inputs()
__magic_name__: str = pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__magic_name__: Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
__magic_name__: List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: List[str] = self.get_inputs()
__magic_name__: Dict = pipe(**__snake_case ).images
__magic_name__: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Any ) -> List[str]:
__magic_name__: str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case )
__magic_name__: int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Union[str, Any] = self.get_inputs()
__magic_name__: Any = pipe(**__snake_case ).images
__magic_name__: int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> Dict:
__magic_name__: Tuple = 0
def callback_fn(__snake_case : int , __snake_case : int , __snake_case : torch.FloatTensor ) -> None:
__magic_name__: Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__magic_name__: List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__magic_name__: int = latents[0, -3:, -3:, -1]
__magic_name__: Union[str, Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__magic_name__: Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__magic_name__: str = latents[0, -3:, -3:, -1]
__magic_name__: Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__magic_name__: Tuple = False
__magic_name__: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case , torch_dtype=torch.floataa )
__magic_name__: Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: Dict = self.get_inputs()
pipe(**__snake_case , callback=__snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase__ ( self : Tuple ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=__snake_case , torch_dtype=torch.floataa )
__magic_name__: int = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__: Optional[int] = self.get_inputs()
__magic_name__: Any = pipe(**__snake_case )
__magic_name__: List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__magic_name__: Any = inputs["""image"""].resize((5_0_4, 5_0_4) )
__magic_name__: List[str] = """timbrooks/instruct-pix2pix"""
__magic_name__: Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
__magic_name__: str = pipe(**__snake_case )
__magic_name__: Optional[int] = output.images[0]
__magic_name__: Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__magic_name__: Optional[Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 96 | 0 |
"""simple docstring"""
import numpy as np
_lowerCamelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self ):
SCREAMING_SNAKE_CASE__ = np.array(A_ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = message.lower()
SCREAMING_SNAKE_CASE__ = message.replace(" " , "" )
SCREAMING_SNAKE_CASE__ = message.replace("j" , "i" )
SCREAMING_SNAKE_CASE__ = np.empty((2, len(A_ )) )
for letter_index in range(len(A_ ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape(2 * len(A_ ) )
SCREAMING_SNAKE_CASE__ = ""
for numbers_index in range(len(A_ ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE__ = int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(A_ , A_ )
SCREAMING_SNAKE_CASE__ = encoded_message + letter
return encoded_message
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = message.lower()
message.replace(" " , "" )
SCREAMING_SNAKE_CASE__ = np.empty(2 * len(A_ ) )
for letter_index in range(len(A_ ) ):
SCREAMING_SNAKE_CASE__ = self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE__ = numbers[0]
SCREAMING_SNAKE_CASE__ = numbers[1]
SCREAMING_SNAKE_CASE__ = first_step.reshape((2, len(A_ )) )
SCREAMING_SNAKE_CASE__ = ""
for numbers_index in range(len(A_ ) ):
SCREAMING_SNAKE_CASE__ = int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE__ = int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE__ = self.numbers_to_letter(A_ , A_ )
SCREAMING_SNAKE_CASE__ = decoded_message + letter
return decoded_message
| 716 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : str = False
_lowerCAmelCase : Dict = False
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = "single_label_classification"
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = "multi_label_classification"
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , "use_cache" ):
return
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(UpperCAmelCase__ , "decoder_layers" , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , "num_decoder_layers" , UpperCAmelCase__ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "num_kv_heads" , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "d_model" , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs["past_key_values"]
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs["input_ids"].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCAmelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 112 | 0 |
'''simple docstring'''
import math
import random
def lowerCamelCase_ ( __UpperCamelCase : float , __UpperCamelCase : bool = False ) -> float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase = 0.02
def lowerCamelCase_ ( __UpperCamelCase : int , __UpperCamelCase : int ) -> float:
"""simple docstring"""
_A = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
_A = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_A = (expected / 1_0_0) - layer_a
# Error delta
_A = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = int(input("""Expected value: """))
lowerCAmelCase = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 292 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 255 , _UpperCamelCase=True , )-> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def UpperCamelCase ( self )-> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase=False )-> List[str]:
if not batched:
_A = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['shortest_edge'] * h / w )
_A = self.size['shortest_edge']
elif w > h:
_A = self.size['shortest_edge']
_A = int(self.size['shortest_edge'] * w / h )
else:
_A = self.size['shortest_edge']
_A = self.size['shortest_edge']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
_A = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> Optional[int]:
_A = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase ( self )-> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase ( self )-> Any:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
pass
def UpperCamelCase ( self )-> Optional[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self )-> List[str]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self )-> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self )-> List[Any]:
# prepare image and target
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_A = json.loads(f.read() )
_A = {'image_id': 3_9769, 'annotations': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase ( self )-> Any:
# prepare image, target and masks_path
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_A = json.loads(f.read() )
_A = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_A = DeformableDetrImageProcessor(format='coco_panoptic' )
_A = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
_A = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 292 | 1 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def snake_case ( a_ : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
UpperCamelCase_ : str = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
UpperCamelCase_ : Dict = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCamelCase_ : List[str] = ".".join(lowercase_ )
return test_module_path
def snake_case ( a_ : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = get_module_path(lowercase_ )
UpperCamelCase_ : str = importlib.import_module(lowercase_ )
return test_module
def snake_case ( a_ : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = []
UpperCamelCase_ : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(lowercase_ , lowercase_ ) )
# sort with class names
return sorted(lowercase_ , key=lambda a_ : x.__name__ )
def snake_case ( a_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
UpperCamelCase_ : int = getattr(lowercase_ , lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase_ : Optional[Any] = getattr(lowercase_ , """all_model_classes""" , [] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ , key=lambda a_ : x.__name__ )
def snake_case ( a_ : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Dict = get_test_classes(lowercase_ )
UpperCamelCase_ : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ , key=lambda a_ : x.__name__ )
def snake_case ( a_ : str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = test_class()
if hasattr(lowercase_ , """setUp""" ):
test.setUp()
UpperCamelCase_ : Tuple = None
if hasattr(lowercase_ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase_ : Tuple = test.model_tester.__class__
return model_tester
def snake_case ( a_ : Any , a_ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = get_test_classes(lowercase_ )
UpperCamelCase_ : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ , key=lambda a_ : x.__name__ )
def snake_case ( a_ : List[Any] , a_ : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = get_test_classes_for_model(lowercase_ , lowercase_ )
UpperCamelCase_ : List[Any] = []
for test_class in test_classes:
UpperCamelCase_ : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ , key=lambda a_ : x.__name__ )
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = get_test_classes(lowercase_ )
UpperCamelCase_ : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def snake_case ( a_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = get_model_classes(lowercase_ )
UpperCamelCase_ : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ , lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def snake_case ( a_ : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = get_model_classes(lowercase_ )
UpperCamelCase_ : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ , lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def snake_case ( a_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ , lowercase_ ):
return o
elif isinstance(lowercase_ , lowercase_ ):
return o.__name__
elif isinstance(lowercase_ , (list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ , lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 707 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCamelCase_ : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase_ : Tuple = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCamelCase_ : Optional[Any] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCamelCase_ : int = tempfile.mkdtemp()
UpperCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ : List[Any] = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
# load decoder from hub
UpperCamelCase_ : Union[str, Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
UpperCamelCase_ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(__lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCAmelCase )
def _UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase_ : str = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_decoder()
UpperCamelCase_ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Any = self.get_decoder()
UpperCamelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[Any] = floats_list((3, 10_00) )
UpperCamelCase_ : Tuple = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase_ : str = processor(__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.get_feature_extractor()
UpperCamelCase_ : List[Any] = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = """This is a test string"""
UpperCamelCase_ : Optional[Any] = processor(text=__lowerCAmelCase )
UpperCamelCase_ : int = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self , __lowerCAmelCase=(2, 10, 16) , __lowerCAmelCase=77 ):
np.random.seed(__lowerCAmelCase )
return np.random.rand(*__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Optional[int] = self.get_decoder()
UpperCamelCase_ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCamelCase_ : Any = processor.decode(__lowerCAmelCase )
UpperCamelCase_ : Any = decoder.decode_beams(__lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase_ : str = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCamelCase_ : List[Any] = processor.batch_decode(__lowerCAmelCase )
else:
with get_context(__lowerCAmelCase ).Pool() as pool:
UpperCamelCase_ : Any = processor.batch_decode(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Tuple = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
UpperCamelCase_ : Optional[int] = decoder.decode_beams_batch(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__lowerCAmelCase , decoded_processor.lm_score )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Tuple = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self._get_dummy_logits()
UpperCamelCase_ : Dict = 15
UpperCamelCase_ : str = -20.0
UpperCamelCase_ : Dict = -4.0
UpperCamelCase_ : Union[str, Any] = processor.batch_decode(
__lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
UpperCamelCase_ : Any = decoded_processor_out.text
UpperCamelCase_ : Tuple = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
UpperCamelCase_ : str = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
UpperCamelCase_ : str = [d[0][0] for d in decoded_decoder_out]
UpperCamelCase_ : List[str] = [d[0][2] for d in decoded_decoder_out]
UpperCamelCase_ : Union[str, Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __lowerCAmelCase )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , __lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , __lowerCAmelCase , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.get_feature_extractor()
UpperCamelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase_ : int = self.get_decoder()
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : str = self._get_dummy_logits()
UpperCamelCase_ : Optional[int] = 2.0
UpperCamelCase_ : List[str] = 5.0
UpperCamelCase_ : Optional[Any] = -20.0
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Union[str, Any] = processor.batch_decode(
__lowerCAmelCase , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
UpperCamelCase_ : List[str] = decoded_processor_out.text
UpperCamelCase_ : List[str] = list(__lowerCAmelCase )
decoder.reset_params(
alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
UpperCamelCase_ : int = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , )
UpperCamelCase_ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase_ : int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCamelCase_ : Any = os.listdir(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase_ : Tuple = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCamelCase_ : Union[str, Any] = os.listdir(__lowerCAmelCase )
UpperCamelCase_ : str = os.listdir(__lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Dict = floats_list((3, 10_00) )
UpperCamelCase_ : List[Any] = processor_wavaveca(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase_ : Tuple = processor_auto(__lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
UpperCamelCase_ : Optional[int] = self._get_dummy_logits()
UpperCamelCase_ : Dict = processor_wavaveca.batch_decode(__lowerCAmelCase )
UpperCamelCase_ : Any = processor_auto.batch_decode(__lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_feature_extractor()
UpperCamelCase_ : int = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : int = self._get_dummy_logits()[0]
UpperCamelCase_ : List[Any] = processor.decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Union[str, Any] = self._get_dummy_logits()
UpperCamelCase_ : Dict = processor.batch_decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase ( self ):
import torch
UpperCamelCase_ : str = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
UpperCamelCase_ : Union[str, Any] = iter(__lowerCAmelCase )
UpperCamelCase_ : int = next(__lowerCAmelCase )
UpperCamelCase_ : List[str] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCamelCase_ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCamelCase_ : Dict = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCamelCase_ : List[Any] = model(__lowerCAmelCase ).logits.cpu().numpy()
UpperCamelCase_ : Tuple = processor.decode(logits[0] , output_word_offsets=__lowerCAmelCase )
UpperCamelCase_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCamelCase_ : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCamelCase_ : Optional[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , __lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , output.text )
# output times
UpperCamelCase_ : str = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """start_time""" ) )
UpperCamelCase_ : Union[str, Any] = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """end_time""" ) )
# fmt: off
UpperCamelCase_ : Union[str, Any] = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCamelCase_ : Union[str, Any] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
| 543 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def snake_case_ ( self):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(1)
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(2)
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(3)
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 155 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
create_all_state(1 , UpperCamelCase_ , UpperCamelCase_ , [] , UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ , total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 , UpperCamelCase_ , level - 1 , UpperCamelCase_ , UpperCamelCase_ )
current_list.pop()
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 155 | 1 |
"""simple docstring"""
from typing import Any
class lowercase__ :
def __init__( self : Tuple , snake_case__ : Any ):
lowerCamelCase_ : List[Any] =data
lowerCamelCase_ : Union[str, Any] =None
def __repr__( self : Tuple ):
return F"""Node({self.data})"""
class lowercase__ :
def __init__( self : Dict ):
lowerCamelCase_ : str =None
def __iter__( self : Optional[Any] ):
lowerCamelCase_ : Optional[int] =self.head
while node:
yield node.data
lowerCamelCase_ : int =node.next
def __len__( self : Any ):
return sum(1 for _ in self )
def __repr__( self : List[str] ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self : int , snake_case__ : int ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Any ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCamelCase_ : int =self.head
for _ in range(snake_case__ ):
lowerCamelCase_ : str =current.next
lowerCamelCase_ : Union[str, Any] =data
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Any ):
self.insert_nth(len(self ) , snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : Any ):
self.insert_nth(0 , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int , snake_case__ : Any ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCamelCase_ : Dict =Node(snake_case__ )
if self.head is None:
lowerCamelCase_ : Union[str, Any] =new_node
elif index == 0:
lowerCamelCase_ : List[str] =self.head # link new_node to head
lowerCamelCase_ : List[Any] =new_node
else:
lowerCamelCase_ : Tuple =self.head
for _ in range(index - 1 ):
lowerCamelCase_ : str =temp.next
lowerCamelCase_ : str =temp.next
lowerCamelCase_ : Union[str, Any] =new_node
def UpperCAmelCase__ ( self : Optional[Any] ): # print every node data
print(self )
def UpperCAmelCase__ ( self : int ):
return self.delete_nth(0 )
def UpperCAmelCase__ ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase__ ( self : int , snake_case__ : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCamelCase_ : Union[str, Any] =self.head # default first node
if index == 0:
lowerCamelCase_ : Optional[Any] =self.head.next
else:
lowerCamelCase_ : List[str] =self.head
for _ in range(index - 1 ):
lowerCamelCase_ : Any =temp.next
lowerCamelCase_ : Union[str, Any] =temp.next
lowerCamelCase_ : List[Any] =temp.next.next
return delete_node.data
def UpperCAmelCase__ ( self : Optional[int] ):
return self.head is None
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =None
lowerCamelCase_ : Optional[int] =self.head
while current:
# Store the current node's next node.
lowerCamelCase_ : Optional[int] =current.next
# Make the current node's next point backwards
lowerCamelCase_ : Any =prev
# Make the previous node be the current node
lowerCamelCase_ : List[str] =current
# Make the current node the next node (to progress iteration)
lowerCamelCase_ : Optional[int] =next_node
# Return prev in order to put the head at the end
lowerCamelCase_ : int =prev
def _snake_case ( ) -> None:
lowerCamelCase_ : Optional[int] =LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase__ ) == i
linked_list.insert_nth(lowerCamelCase__ , i + 1 )
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase__ ) == 9
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCamelCase_ : Tuple =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase__ ) == "->".join(str(lowerCamelCase__ ) for i in range(-8 , 1 ) )
def _snake_case ( ) -> None:
lowerCamelCase_ : Union[str, Any] =[
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCamelCase_ : int =LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCamelCase_ : Dict =linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCamelCase_ : List[Any] =linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCamelCase_ : Optional[int] =linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowerCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase__ )
assert (
str(lowerCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case ( ) -> List[str]:
from doctest import testmod
testmod()
lowerCamelCase_ : Optional[Any] =LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowerCamelCase__ )
print("\nReading/changing Node data using indexing:" )
print(F"""Element at Position 1: {linked_list[1]}""" )
lowerCamelCase_ : List[Any] =input("Enter New Value: " ).strip()
print("New list:" )
print(lowerCamelCase__ )
print(F"""length of linked_list is : {len(lowerCamelCase__ )}""" )
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Dict ) -> Dict:
lowerCamelCase_ : Dict =[0] * len(lowerCamelCase__ )
lowerCamelCase_ : Tuple =[]
lowerCamelCase_ : Optional[int] =[1] * len(lowerCamelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCamelCase__ )
while queue:
lowerCamelCase_ : List[Any] =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase_ : Union[str, Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCamelCase__ )
print(max(lowerCamelCase__ ) )
# Adjacency list of Graph
A__ : List[str] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 244 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=False , _snake_case=True , _snake_case="None" , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : List[Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : List[str] = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Dict = use_labels
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Tuple = num_labels
_UpperCamelCase : Any = num_choices
_UpperCamelCase : List[Any] = relative_attention
_UpperCamelCase : Dict = position_biased_input
_UpperCamelCase : List[Any] = pos_att_type
_UpperCamelCase : List[str] = scope
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : int = None
if self.use_input_mask:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase : int = None
if self.use_token_type_ids:
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : Any = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[Any] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowercase ( self , _snake_case ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
_UpperCamelCase : Optional[int] = DebertaVaModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCamelCase : int = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
_UpperCamelCase : Union[str, Any] = model(A_ , token_type_ids=A_ )[0]
_UpperCamelCase : int = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
_UpperCamelCase : Optional[int] = DebertaVaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
_UpperCamelCase : List[str] = self.num_labels
_UpperCamelCase : Tuple = DebertaVaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_UpperCamelCase : Any = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : str = self.num_labels
_UpperCamelCase : Optional[Any] = DebertaVaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_UpperCamelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = DebertaVaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_UpperCamelCase : Optional[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
_UpperCamelCase : Dict = DebertaVaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> str:
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : Optional[Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ : int = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Tuple = True
A__ : List[Any] = False
A__ : Union[str, Any] = False
A__ : int = False
A__ : Dict = False
def _lowercase ( self ) -> str:
_UpperCamelCase : Tuple = DebertaVaModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def _lowercase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
def _lowercase ( self ) -> str:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A_ )
@slow
def _lowercase ( self ) -> Optional[int]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = DebertaVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowercase ( self ) -> Tuple:
pass
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[Any] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_UpperCamelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_UpperCamelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase : int = model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
_UpperCamelCase : Optional[Any] = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 683 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
A__ : Union[str, Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
A__ : Any = '''|'''.join(sys.argv[1:])
A__ : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
A__ : List[str] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 171 | 0 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowercase__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
lowercase__ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = CamembertTokenizer
snake_case = CamembertTokenizerFast
snake_case = True
snake_case = True
def _lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = CamembertTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ):
snake_case_ = "<pad>"
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(UpperCAmelCase_ ) , 10_04 )
def _lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def _lowercase ( self ):
snake_case_ = CamembertTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
snake_case_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case_ = "I was born in 92000, and this is falsé."
snake_case_ = tokenizer.encode(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( self ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = "I was born in 92000, and this is falsé."
snake_case_ = tokenizer.tokenize(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _lowercase ( self ):
snake_case_ = {"input_ids": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case_ = [
"Le transformeur est un modèle d\'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=UpperCAmelCase_ , )
| 706 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
super().__init__()
snake_case_ = nn.ModuleList(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase_ , UpperCAmelCase_ , self.nets ) ):
snake_case_ , snake_case_ = controlnet(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# merge samples
if i == 0:
snake_case_ , snake_case_ = down_samples, mid_sample
else:
snake_case_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
snake_case_ = 0
snake_case_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase_ , is_main_process=UpperCAmelCase_ , save_function=UpperCAmelCase_ , safe_serialization=UpperCAmelCase_ , variant=UpperCAmelCase_ , )
idx += 1
snake_case_ = model_path_to_save + f'''_{idx}'''
@classmethod
def _lowercase ( cls , UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ = pretrained_model_path
while os.path.isdir(UpperCAmelCase_ ):
snake_case_ = ControlNetModel.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
controlnets.append(UpperCAmelCase_ )
idx += 1
snake_case_ = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(UpperCAmelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(UpperCAmelCase_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(UpperCAmelCase_ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(UpperCAmelCase_ )
| 420 | 0 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = model.config
UpperCAmelCase__ : Tuple = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCAmelCase__ : Any = MBartConfig(
is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=snake_case__ , add_final_layer_norm=snake_case__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> int:
'''simple docstring'''
if "encoder.model" in name:
UpperCAmelCase__ : Dict = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
UpperCAmelCase__ : Any = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase__ : List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
UpperCAmelCase__ : Union[str, Any] = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase__ : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
UpperCAmelCase__ : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase__ : List[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase__ : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCAmelCase__ : Dict = '''encoder.layernorm.bias'''
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[Any] )-> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Dict = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
UpperCAmelCase__ : Any = key.split("." )
UpperCAmelCase__ : Dict = int(key_split[3] )
UpperCAmelCase__ : int = int(key_split[5] )
UpperCAmelCase__ : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ : List[str] = val[:dim, :]
UpperCAmelCase__ : Optional[int] = val[dim : dim * 2, :]
UpperCAmelCase__ : str = val[-dim:, :]
else:
UpperCAmelCase__ : List[str] = val[:dim]
UpperCAmelCase__ : Tuple = val[dim : dim * 2]
UpperCAmelCase__ : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase__ : Any = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Tuple=None , snake_case : Union[str, Any]=False )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
UpperCAmelCase__ : Union[str, Any] = get_configs(snake_case__ )
UpperCAmelCase__ : int = DonutSwinModel(snake_case__ )
UpperCAmelCase__ : Optional[Any] = MBartForCausalLM(snake_case__ )
UpperCAmelCase__ : Any = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
UpperCAmelCase__ : Any = original_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
UpperCAmelCase__ : List[str] = load_dataset("hf-internal-testing/example-documents" )
UpperCAmelCase__ : Optional[Any] = dataset['''test'''][0]['''image'''].convert("RGB" )
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(snake_case__ , from_slow=snake_case__ )
UpperCAmelCase__ : Optional[Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase__ : int = DonutProcessor(snake_case__ , snake_case__ )
UpperCAmelCase__ : Dict = processor(snake_case__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase__ : int = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase__ : Optional[Any] = '''When is the coffee break?'''
UpperCAmelCase__ : Optional[Any] = task_prompt.replace("{user_input}" , snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase__ : Any = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase__ : str = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase__ : Optional[Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase__ : List[str] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase__ : List[Any] = '''hello world'''
else:
raise ValueError("Model name not supported" )
UpperCAmelCase__ : Union[str, Any] = original_model.decoder.tokenizer(snake_case__ , add_special_tokens=snake_case__ , return_tensors="pt" )[
'''input_ids'''
]
UpperCAmelCase__ : List[str] = original_model.encoder.model.patch_embed(snake_case__ )
UpperCAmelCase__ : Dict = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
# verify encoder hidden states
UpperCAmelCase__ : List[str] = original_model.encoder(snake_case__ )
UpperCAmelCase__ : Optional[Any] = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-2 )
# verify decoder hidden states
UpperCAmelCase__ : Optional[int] = original_model(snake_case__ , snake_case__ , snake_case__ ).logits
UpperCAmelCase__ : List[Any] = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 438 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = parent
A : int = 13
A : Dict = 7
A : str = True
A : Dict = True
A : Tuple = True
A : Union[str, Any] = True
A : Dict = True
A : str = False
A : Union[str, Any] = False
A : Union[str, Any] = False
A : List[str] = 2
A : Optional[int] = 99
A : List[str] = 0
A : int = 32
A : Any = 2
A : Optional[Any] = 4
A : List[Any] = 0.1
A : List[str] = 0.1
A : Tuple = 512
A : Optional[Any] = 16
A : List[str] = 2
A : Tuple = 0.02
A : List[str] = 3
A : List[Any] = 4
A : Any = '''last'''
A : int = True
A : Union[str, Any] = None
A : Dict = 0
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A : int = None
if self.use_input_lengths:
A : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A : Tuple = None
if self.use_token_type_ids:
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A : List[Any] = None
A : List[Any] = None
A : Tuple = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : List[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : Dict = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : int = TFFlaubertModel(config=SCREAMING_SNAKE_CASE )
A : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
A : int = model(SCREAMING_SNAKE_CASE )
A : List[str] = [input_ids, input_mask]
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : List[str] = TFFlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE )
A : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Union[str, Any] = TFFlaubertForSequenceClassification(SCREAMING_SNAKE_CASE )
A : Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
A : Optional[Any] = self.num_labels
A : str = TFFlaubertForTokenClassification(config=SCREAMING_SNAKE_CASE )
A : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : str = self.num_choices
A : int = TFFlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
A : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Any = config_and_inputs
A : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__magic_name__ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : int = TFFlaubertModelTester(self )
A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , emb_dim=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFFlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Tuple = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
A : List[str] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A : int = model(SCREAMING_SNAKE_CASE )[0]
A : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
A : Union[str, Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 634 | 0 |
from typing import Any
def lowerCAmelCase ( UpperCamelCase__ : list ):
"""simple docstring"""
if not input_list:
return []
__UpperCAmelCase = [input_list.count(_lowerCamelCase ) for value in input_list]
__UpperCAmelCase = max(_lowerCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 | '''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( A__: dict , A__: YolosConfig , A__: bool = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _lowerCAmelCase ( A__: dict , A__: YolosForObjectDetection ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(A__ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( A__: str , A__: str , A__: str , A__: bool = False ):
'''simple docstring'''
UpperCAmelCase = get_yolos_config(A__ )
# load original state_dict
UpperCAmelCase = torch.load(A__ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(A__ )
model.eval()
UpperCAmelCase = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=A__ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**A__ )
UpperCAmelCase , UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , A__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , A__ , atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(A__ , organization='''hustvl''' )
model.push_to_hub(A__ , organization='''hustvl''' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 254 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( A__: str=None , A__: List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__SCREAMING_SNAKE_CASE = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Benchmark training of model"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Verbose memory tracing"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Trace memory line by line"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Save result to a CSV file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Save all print statements in a log file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Whether to print environment information"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__SCREAMING_SNAKE_CASE = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _snake_case , )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 254 | 1 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a_ = 2_0_4_8
a_ = 4_0_9_6
a_ = 4_2
a_ = os.environ.pop('PROCESS_TRAIN', 'false')
a_ = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def __UpperCAmelCase ( __UpperCamelCase ):
def choose_first(__UpperCamelCase , __UpperCamelCase=False ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
__lowercase : List[str] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__lowercase : str = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
__lowercase : Tuple = {'''id''': example['''id''']}
__lowercase : List[str] = example['''annotations''']
__lowercase : List[Any] = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__lowercase : str = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__lowercase : List[Any] = []
__lowercase : Any = []
__lowercase : int = ['''<cls>''']
else:
__lowercase : int = ['''short''']
__lowercase : Optional[int] = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
__lowercase : Tuple = ['''long''']
__lowercase : str = choose_first(annotation['''long_answer'''] , is_long_answer=__UpperCamelCase )
__lowercase : Optional[int] = []
answer.update(__UpperCamelCase )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
__lowercase : Union[str, Any] = True
else:
__lowercase : List[str] = False
__lowercase : Optional[int] = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , __UpperCamelCase ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
__lowercase : int = _get_single_answer(__UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase : Optional[Any] = example['''document''']['''tokens''']
__lowercase : Optional[Any] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(__UpperCamelCase ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__lowercase : Optional[Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__lowercase : Optional[Any] = example['''document''']['''tokens''']
__lowercase : Optional[Any] = answer['''start_token''']
__lowercase : Union[str, Any] = answer['''end_token''']
__lowercase : List[str] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__lowercase : Any = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__lowercase : Union[str, Any] = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__lowercase : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__lowercase : List[str] = ''' '''.join([old[i] for i in range(len(__UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , __UpperCamelCase , end='''\n''' )
print('''Old:''' , __UpperCamelCase , end='''\n\n''' )
return {
"context": " ".join(__UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=20_48 , __UpperCamelCase=40_96 , __UpperCamelCase=True ):
# overlap will be of doc_stride - q_len
__lowercase : str = get_context_and_ans(__UpperCamelCase , assertion=__UpperCamelCase )
__lowercase : Optional[Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__lowercase : int = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
__lowercase : List[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase : Dict = []
__lowercase : List[str] = []
__lowercase : Union[str, Any] = input_ids[:q_len]
__lowercase : List[Any] = range(__UpperCamelCase , len(__UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__lowercase : str = i + max_length - q_len
__lowercase : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(__UpperCamelCase ),
"end_token": [-1_00] * len(__UpperCamelCase ),
"category": category,
},
}
__lowercase : List[str] = out['''context'''].split()
__lowercase : int = splitted_context[answer['''end_token''']]
__lowercase : Optional[int] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=__UpperCamelCase , ).input_ids )
__lowercase : Optional[Any] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=__UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__lowercase : int = len(tokenizer(__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__lowercase : List[Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__lowercase : Dict = answer['''start_token''']
__lowercase : Tuple = answer['''end_token''']
if assertion:
__lowercase : List[str] = tokenizer.decode(__UpperCamelCase )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , __UpperCamelCase , end='''\n\n''' )
if len(__UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__lowercase : str = input_ids[:q_len]
__lowercase : Optional[Any] = range(__UpperCamelCase , len(__UpperCamelCase ) , max_length - doc_stride )
__lowercase : Dict = []
__lowercase : str = []
__lowercase : Any = []
__lowercase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
__lowercase : Any = i + max_length - q_len
__lowercase : str = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__lowercase : Union[str, Any] = start_token - i + q_len
__lowercase : List[str] = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
__lowercase : Optional[Any] = -1_00
__lowercase : Optional[int] = -1_00
answers_category.append('''null''' )
__lowercase : Union[str, Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__UpperCamelCase )
answers_end_token.append(__UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(__UpperCamelCase ) )
print('''Old:''' , tokenizer.decode(__UpperCamelCase ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=20_48 , __UpperCamelCase=40_96 , __UpperCamelCase=False ):
__lowercase : int = get_strided_contexts_and_ans(
__UpperCamelCase , __UpperCamelCase , doc_stride=__UpperCamelCase , max_length=__UpperCamelCase , assertion=__UpperCamelCase , )
return example
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
with jsonlines.open(__UpperCamelCase , '''a''' ) as writer:
for example in tqdm(__UpperCamelCase , total=len(__UpperCamelCase ) , desc='''Saving samples ... ''' ):
__lowercase : int = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a_ = load_dataset('natural_questions')
a_ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
a_ = data['train' if PROCESS_TRAIN == 'true' else 'validation']
a_ = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
a_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a_ = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
a_ = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 709 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = 10
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = [1, 2, 3, 4]
__lowercase : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__lowercase ,__lowercase : Optional[Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = ''''''
__lowercase ,__lowercase : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__lowercase ,__lowercase : int = process_story(UpperCamelCase_ )
__lowercase : Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : List[str] = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
__lowercase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[Any] = 1_01
__lowercase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowercase : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase : Optional[int] = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 523 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 400 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]:
lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {}
lowercase :Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple:
lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ):
'''simple docstring'''
super().__init__()
lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' )
lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' )
lowercase :List[Any] = self.get_char_lens(self.src_file )
lowercase :Tuple = max_source_length
lowercase :Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase :Any = tokenizer
lowercase :Tuple = prefix
if n_obs is not None:
lowercase :List[str] = self.src_lens[:n_obs]
lowercase :List[Any] = src_lang
lowercase :str = tgt_lang
def __len__( self : Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[int] = index + 1 # linecache starts at 1
lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' )
lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' )
lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' )
lowercase :List[str] = source_inputs['''input_ids'''].squeeze()
lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze()
lowercase :List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( snake_case__ : Optional[int] ):
'''simple docstring'''
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase :str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ )
lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
lowercase :Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def lowerCamelCase (a_ :List[List]) -> Tuple:
return list(itertools.chain.from_iterable(a_))
def lowerCamelCase (a_ :str) -> None:
lowercase :List[str] = get_git_info()
save_json(a_ , os.path.join(a_ , '''git_log.json'''))
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str:
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=a_ , **a_)
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
with open(a_) as f:
return json.load(a_)
def lowerCamelCase () -> List[str]:
lowercase :Dict = git.Repo(search_parent_directories=a_)
lowercase :int = {
'''repo_id''': str(a_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List:
return list(map(a_ , a_))
def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any:
with open(a_ , '''wb''') as f:
return pickle.dump(a_ , a_)
def lowerCamelCase (a_ :List[str]) -> List[str]:
def remove_articles(a_ :Union[str, Any]):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_)
def white_space_fix(a_ :Tuple):
return " ".join(text.split())
def remove_punc(a_ :int):
lowercase :List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a_ :int):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_))))
def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]:
lowercase :Dict = normalize_answer(a_).split()
lowercase :int = normalize_answer(a_).split()
lowercase :List[Any] = Counter(a_) & Counter(a_)
lowercase :Optional[int] = sum(common.values())
if num_same == 0:
return 0
lowercase :str = 1.0 * num_same / len(a_)
lowercase :Tuple = 1.0 * num_same / len(a_)
lowercase :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]:
return normalize_answer(a_) == normalize_answer(a_)
def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict:
assert len(a_) == len(a_)
lowercase :Any = 0
for hypo, pred in zip(a_ , a_):
em += exact_match_score(a_ , a_)
if len(a_) > 0:
em /= len(a_)
return {"em": em}
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
return model_prefix.startswith('''rag''')
def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any:
lowercase :List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase :str = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_):
if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_))
delattr(a_ , a_)
continue
lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_))
delattr(a_ , a_)
return hparams, config
| 677 | 0 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int = 1_0**1_2 ):
__a : List[str] = 1
__a : Any = 0
__a : Any = 1
__a : List[Any] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase )
__lowercase = max(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 80 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( a ):
A__ : Optional[Any] = 'openai/whisper-base'
A__ : Optional[Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A__ : Union[str, Any] = 'transcriber'
A__ : Optional[int] = WhisperProcessor
A__ : List[str] = WhisperForConditionalGeneration
A__ : List[Any] = ['audio']
A__ : Optional[int] = ['text']
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' ).input_features
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Any ):
"""simple docstring"""
return self.model.generate(inputs=UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 598 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( __a ) -> int:
'''simple docstring'''
if not isinstance(__a , __a ):
_UpperCamelCase :Union[str, Any] =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__a )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = StableDiffusionLDMaDPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase :Tuple =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_UpperCamelCase :Optional[Any] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCamelCase :Tuple =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase :int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCamelCase :List[str] =CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCamelCase :List[str] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCamelCase :str =torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase :Union[str, Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :str ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase :List[Any] =self.get_dummy_components()
_UpperCamelCase :List[Any] =StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase :List[str] =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Any =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Tuple =output.rgb, output.depth
_UpperCamelCase :List[Any] =rgb[0, -3:, -3:, -1]
_UpperCamelCase :Union[str, Any] =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase :Union[str, Any] =np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_UpperCamelCase :Optional[int] =np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] =self.get_dummy_components()
_UpperCamelCase :int =StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase :List[str] =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Dict =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =3 * [inputs["""prompt"""]]
# forward
_UpperCamelCase :List[str] =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Tuple =output.rgb, output.depth
_UpperCamelCase :Any =rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase :Optional[Any] =depth_slice_a[0, -3:, -1]
_UpperCamelCase :Tuple =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =3 * [inputs.pop("""prompt""" )]
_UpperCamelCase :str =ldmad_pipe.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
_UpperCamelCase :List[Any] =text_inputs["""input_ids"""].to(lowerCAmelCase__ )
_UpperCamelCase :Dict =ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
_UpperCamelCase :Dict =prompt_embeds
# forward
_UpperCamelCase :Tuple =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Any =output.rgb, output.depth
_UpperCamelCase :Optional[int] =rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase :List[Any] =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :int ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase :Tuple =self.get_dummy_components()
_UpperCamelCase :str =PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_UpperCamelCase :List[Any] =StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
_UpperCamelCase :int =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :str =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] ="""french fries"""
_UpperCamelCase :Any =ldmad_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Tuple =output.rgb, output.depth
_UpperCamelCase :List[str] =rgb[0, -3:, -3:, -1]
_UpperCamelCase :Any =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase :Union[str, Any] =np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_UpperCamelCase :List[str] =np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :Any =np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase :int =torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase :Any ={
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_UpperCamelCase :Union[str, Any] =ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Any =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :Tuple =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Any =output.rgb, output.depth
_UpperCamelCase :Tuple =rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase :Optional[int] =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase :Any =np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_UpperCamelCase :int =np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Dict =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase :Tuple =torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase :int ={
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :int =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :Any =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Optional[int] =output.rgb, output.depth
_UpperCamelCase :Tuple =0.49_5586
_UpperCamelCase :List[Any] =0.3379_5515
_UpperCamelCase :List[Any] =112.4_8518
_UpperCamelCase :str =98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :int =StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :Dict =ldmad_pipe(**lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase :Optional[int] =output.rgb, output.depth
_UpperCamelCase :Optional[Any] =0.419_4127
_UpperCamelCase :Optional[Any] =0.3537_5586
_UpperCamelCase :Any =0.563_8502
_UpperCamelCase :Tuple =0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3 | 512 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
a_ =[r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 50257 , __UpperCAmelCase = 1024 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = None , __UpperCAmelCase = "gelu_new" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , )-> Any:
'''simple docstring'''
super().__init__()
lowerCAmelCase__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
lowerCAmelCase__ = prefix_inner_dim
lowerCAmelCase__ = prefix_hidden_dim
lowerCAmelCase__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase__ = (
nn.Linear(self.prefix_hidden_dim , __UpperCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase__ = GPTaConfig(
vocab_size=__UpperCAmelCase , n_positions=__UpperCAmelCase , n_embd=__UpperCAmelCase , n_layer=__UpperCAmelCase , n_head=__UpperCAmelCase , n_inner=__UpperCAmelCase , activation_function=__UpperCAmelCase , resid_pdrop=__UpperCAmelCase , embd_pdrop=__UpperCAmelCase , attn_pdrop=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , initializer_range=__UpperCAmelCase , scale_attn_weights=__UpperCAmelCase , use_cache=__UpperCAmelCase , scale_attn_by_inverse_layer_idx=__UpperCAmelCase , reorder_and_upcast_attn=__UpperCAmelCase , )
lowerCAmelCase__ = GPTaLMHeadModel(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.transformer.transformer.wte(__UpperCAmelCase )
lowerCAmelCase__ = self.encode_prefix(__UpperCAmelCase )
lowerCAmelCase__ = self.decode_prefix(__UpperCAmelCase )
lowerCAmelCase__ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase__ = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase__ = self.transformer(inputs_embeds=__UpperCAmelCase , labels=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> torch.Tensor:
'''simple docstring'''
return torch.zeros(__UpperCAmelCase , self.prefix_length , dtype=torch.intaa , device=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
return self.encode_prefix(__UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = torch.split(__UpperCAmelCase , 1 , dim=0 )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for feature in features:
lowerCAmelCase__ = self.decode_prefix(feature.to(__UpperCAmelCase ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase__ , lowerCAmelCase__ = self.generate_beam(
input_embeds=__UpperCAmelCase , device=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase__ = torch.stack(__UpperCAmelCase )
lowerCAmelCase__ = torch.stack(__UpperCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = 5 , __UpperCAmelCase = 67 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = None , )-> Any:
'''simple docstring'''
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.int )
lowerCAmelCase__ = torch.zeros(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase__ = input_embeds
else:
lowerCAmelCase__ = self.transformer.transformer.wte(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = self.transformer(inputs_embeds=__UpperCAmelCase )
lowerCAmelCase__ = outputs.logits
lowerCAmelCase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase__ = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase__ , lowerCAmelCase__ = logits.topk(__UpperCAmelCase , -1 )
lowerCAmelCase__ = generated.expand(__UpperCAmelCase , *generated.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase__ = next_tokens
else:
lowerCAmelCase__ = tokens.expand(__UpperCAmelCase , *tokens.shape[1:] )
lowerCAmelCase__ = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase__ = -float(np.inf )
lowerCAmelCase__ = 0
lowerCAmelCase__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase__ = scores_sum / seq_lengths[:, None]
lowerCAmelCase__ , lowerCAmelCase__ = scores_sum_average.view(-1 ).topk(__UpperCAmelCase , -1 )
lowerCAmelCase__ = next_tokens // scores_sum.shape[1]
lowerCAmelCase__ = seq_lengths[next_tokens_source]
lowerCAmelCase__ = next_tokens % scores_sum.shape[1]
lowerCAmelCase__ = next_tokens.unsqueeze(1 )
lowerCAmelCase__ = tokens[next_tokens_source]
lowerCAmelCase__ = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase__ = generated[next_tokens_source]
lowerCAmelCase__ = scores_sum_average * seq_lengths
lowerCAmelCase__ = is_stopped[next_tokens_source]
lowerCAmelCase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase__ = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase__ = is_stopped + next_tokens.eq(__UpperCAmelCase ).squeeze()
if is_stopped.all():
break
lowerCAmelCase__ = scores / seq_lengths
lowerCAmelCase__ = scores.argsort(descending=__UpperCAmelCase )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase__ = [tokens[i] for i in order]
lowerCAmelCase__ = torch.stack(__UpperCAmelCase , dim=0 )
lowerCAmelCase__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 339 |
from __future__ import annotations
def _a ( UpperCamelCase_ : list[int] , UpperCamelCase_ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase__ = i + 1
else:
lowerCAmelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 339 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """marian"""
A_ = ["""past_key_values"""]
A_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase_ : Union[str, Any]=5_8101 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=1024 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : str=4096 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Union[str, Any]=4096 , UpperCamelCase_ : Tuple=16 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : str=5_8100 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Union[str, Any]=5_8100 , UpperCamelCase_ : int=0 , UpperCamelCase_ : str=0 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Any , ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = vocab_size
_lowercase : Tuple = decoder_vocab_size or vocab_size
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Tuple = d_model
_lowercase : Optional[Any] = encoder_ffn_dim
_lowercase : Optional[Any] = encoder_layers
_lowercase : List[Any] = encoder_attention_heads
_lowercase : int = decoder_ffn_dim
_lowercase : List[Any] = decoder_layers
_lowercase : List[Any] = decoder_attention_heads
_lowercase : Tuple = dropout
_lowercase : int = attention_dropout
_lowercase : str = activation_dropout
_lowercase : int = activation_function
_lowercase : Any = init_std
_lowercase : Dict = encoder_layerdrop
_lowercase : Optional[Any] = decoder_layerdrop
_lowercase : List[Any] = use_cache
_lowercase : List[str] = encoder_layers
_lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : int = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase : Tuple = {0: 'batch'}
_lowercase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
_lowercase : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase : List[Any] = self.num_layers
for i in range(UpperCamelCase_ ):
_lowercase : str = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[str] = super().outputs
else:
_lowercase : int = super(UpperCamelCase_ , self ).outputs
if self.use_past:
_lowercase , _lowercase : str = self.num_layers
for i in range(UpperCamelCase_ ):
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
_lowercase : Any = seq_length if not self.use_past else 1
_lowercase : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[int] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowercase : str = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase : Optional[Any] = common_inputs['input_ids'].shape
_lowercase : List[str] = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase : Tuple = self.num_attention_heads
_lowercase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : List[Any] = decoder_seq_length + 3
_lowercase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
_lowercase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase : Dict = self.num_layers
_lowercase : Dict = min(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
_lowercase : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
_lowercase : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase : Any = seqlen + 2
_lowercase , _lowercase : Union[str, Any] = self.num_layers
_lowercase , _lowercase : List[Any] = self.num_attention_heads
_lowercase : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : Union[str, Any] = common_inputs['attention_mask'].dtype
_lowercase : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
_lowercase : Optional[Any] = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __UpperCAmelCase ( self : int , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase : int = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
_lowercase : int = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
_lowercase : Optional[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase : List[str] = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
_lowercase : Dict = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Tuple = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
_lowercase : int = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__UpperCAmelCase = 3
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
print("""Generating primitive root of p""" )
while True:
UpperCAmelCase__ : List[Any] = random.randrange(3 , __UpperCamelCase )
if pow(__UpperCamelCase , 2 , __UpperCamelCase ) == 1:
continue
if pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) == 1:
continue
return g
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
print("""Generating prime p...""" )
UpperCAmelCase__ : str = rabin_miller.generate_large_prime(__UpperCamelCase ) # select large prime number.
UpperCAmelCase__ : Optional[Any] = primitive_root(__UpperCamelCase ) # one primitive root on modulo p.
UpperCAmelCase__ : List[str] = random.randrange(3 , __UpperCamelCase ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase__ : List[str] = cryptomath.find_mod_inverse(pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
UpperCAmelCase__ : Any = (key_size, e_a, e_a, p)
UpperCAmelCase__ : List[str] = (key_size, d)
return public_key, private_key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("""\nWARNING:""" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCAmelCase__ , UpperCAmelCase__ : str = generate_key(__UpperCamelCase )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , """w""" ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , """w""" ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def lowerCAmelCase ( ):
'''simple docstring'''
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 65 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : Any ) -> str:
snake_case__ = data
snake_case__ = None
def __repr__( self : Optional[Any] ) -> str:
return f'''Node({self.data})'''
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str ) -> List[Any]:
snake_case__ = None
def __iter__( self : str ) -> Any:
snake_case__ = self.head
while node:
yield node.data
snake_case__ = node.next
def __len__( self : int ) -> int:
return sum(1 for _ in self )
def __repr__( self : Dict ) -> str:
return "->".join([str(lowerCAmelCase__ ) for item in self] )
def __getitem__( self : int , lowerCAmelCase__ : int ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
snake_case__ = self.head
for _ in range(lowerCAmelCase__ ):
snake_case__ = current.next
snake_case__ = data
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : Any ) -> None:
self.insert_nth(len(self ) , lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : Any ) -> None:
self.insert_nth(0 , lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
snake_case__ = Node(lowerCAmelCase__ )
if self.head is None:
snake_case__ = new_node
elif index == 0:
snake_case__ = self.head # link new_node to head
snake_case__ = new_node
else:
snake_case__ = self.head
for _ in range(index - 1 ):
snake_case__ = temp.next
snake_case__ = temp.next
snake_case__ = new_node
def UpperCAmelCase_ ( self : Optional[int] ) -> None: # print every node data
print(self )
def UpperCAmelCase_ ( self : Dict ) -> Any:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self : List[Any] ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self : List[str] , lowerCAmelCase__ : int = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
snake_case__ = self.head # default first node
if index == 0:
snake_case__ = self.head.next
else:
snake_case__ = self.head
for _ in range(index - 1 ):
snake_case__ = temp.next
snake_case__ = temp.next
snake_case__ = temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self : Tuple ) -> bool:
return self.head is None
def UpperCAmelCase_ ( self : Any ) -> None:
snake_case__ = None
snake_case__ = self.head
while current:
# Store the current node's next node.
snake_case__ = current.next
# Make the current node's next point backwards
snake_case__ = prev
# Make the previous node be the current node
snake_case__ = current
# Make the current node the next node (to progress iteration)
snake_case__ = next_node
# Return prev in order to put the head at the end
snake_case__ = prev
def _lowercase ( ):
snake_case__ = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
snake_case__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 , 1 ) )
def _lowercase ( ):
snake_case__ = [
-9,
100,
Node(7734_5112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
snake_case__ = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
snake_case__ = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
snake_case__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
snake_case__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowercase ( ):
from doctest import testmod
testmod()
snake_case__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(__UpperCamelCase )
print("""\nReading/changing Node data using indexing:""" )
print(F'''Element at Position 1: {linked_list[1]}''' )
snake_case__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(__UpperCamelCase )
print(F'''length of linked_list is : {len(__UpperCamelCase )}''' )
if __name__ == "__main__":
main()
| 214 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase :Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase :Optional[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a ( _a , _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "swin"
SCREAMING_SNAKE_CASE : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , snake_case : List[str]=224 , snake_case : Any=4 , snake_case : Dict=3 , snake_case : List[Any]=96 , snake_case : Tuple=[2, 2, 6, 2] , snake_case : Optional[Any]=[3, 6, 12, 24] , snake_case : Dict=7 , snake_case : Union[str, Any]=4.0 , snake_case : List[Any]=True , snake_case : int=0.0 , snake_case : List[Any]=0.0 , snake_case : Any=0.1 , snake_case : List[Any]="gelu" , snake_case : Optional[int]=False , snake_case : int=0.02 , snake_case : int=1E-5 , snake_case : Union[str, Any]=32 , snake_case : Any=None , snake_case : str=None , **snake_case : str , ) -> Tuple:
super().__init__(**snake_case )
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : List[str] = patch_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Optional[int] = embed_dim
__UpperCAmelCase : Tuple = depths
__UpperCAmelCase : Optional[int] = len(snake_case )
__UpperCAmelCase : int = num_heads
__UpperCAmelCase : Tuple = window_size
__UpperCAmelCase : Dict = mlp_ratio
__UpperCAmelCase : Tuple = qkv_bias
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = drop_path_rate
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = use_absolute_embeddings
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : List[str] = int(embed_dim * 2 ** (len(snake_case ) - 1) )
__UpperCAmelCase : Union[str, Any] = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase : Any = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Tuple ) -> float:
return 1E-4 | 266 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : int | None = None ) -> int:
__UpperCAmelCase : str = value
__UpperCAmelCase : Node | None = None # Added in order to delete a node easier
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
def __repr__( self : str ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : Node | None = None ) -> str:
__UpperCAmelCase : Optional[Any] = root
def __str__( self : str ) -> str:
return str(self.root )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Node , snake_case : Node | None ) -> None:
if new_children is not None: # reset its kids
__UpperCAmelCase : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case ): # If it is the right children
__UpperCAmelCase : int = new_children
else:
__UpperCAmelCase : Tuple = new_children
else:
__UpperCAmelCase : List[Any] = new_children
def lowerCamelCase__ ( self : Optional[int] , snake_case : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase__ ( self : int ) -> bool:
return self.root is None
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[Any] ) -> None:
__UpperCAmelCase : int = Node(snake_case ) # create a new Node
if self.empty(): # if Tree is empty
__UpperCAmelCase : List[Any] = new_node # set its root
else: # Tree is not empty
__UpperCAmelCase : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__UpperCAmelCase : Optional[int] = new_node # We insert the new node in a leaf
break
else:
__UpperCAmelCase : List[Any] = parent_node.left
else:
if parent_node.right is None:
__UpperCAmelCase : Optional[int] = new_node
break
else:
__UpperCAmelCase : List[str] = parent_node.right
__UpperCAmelCase : int = parent_node
def lowerCamelCase__ ( self : Optional[int] , *snake_case : List[Any] ) -> None:
for value in values:
self.__insert(snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
__UpperCAmelCase : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__UpperCAmelCase : Union[str, Any] = node.left if value < node.value else node.right
return node
def lowerCamelCase__ ( self : str , snake_case : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__UpperCAmelCase : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
__UpperCAmelCase : str = node.right
return node
def lowerCamelCase__ ( self : int , snake_case : Node | None = None ) -> Node | None:
if node is None:
__UpperCAmelCase : str = self.root
if self.root is None:
return None
if not self.empty():
__UpperCAmelCase : List[str] = self.root
while node.left is not None:
__UpperCAmelCase : str = node.left
return node
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> None:
__UpperCAmelCase : List[str] = self.search(snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case , snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case , node.left )
else:
__UpperCAmelCase : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__UpperCAmelCase : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase__ ( self : List[str] , snake_case : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase__ ( self : str , snake_case : list , snake_case : Node | None ) -> None:
if node:
self.inorder(snake_case , node.left )
arr.append(node.value )
self.inorder(snake_case , node.right )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Node ) -> int:
__UpperCAmelCase : list[int] = []
self.inorder(snake_case , snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def _a ( _lowercase : Node | None ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
if curr_node is not None:
__UpperCAmelCase : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__UpperCAmelCase : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_lowercase )
# Prints all the elements of the list in order traversal
print(_lowercase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowercase )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 266 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCamelCase ( lowerCAmelCase__ : Dict[str, torch.Tensor] ):
__a : List[Any] = []
__a : str = []
__a : Tuple = []
for rt in rc.restypes:
__a : str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__a : Tuple = {name: i for i, name in enumerate(lowerCAmelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__a : Tuple = torch.tensor(
lowerCAmelCase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__a : Dict = torch.tensor(
lowerCAmelCase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__a : Optional[int] = torch.tensor(
lowerCAmelCase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__a : Any = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__a : int = restype_atomaa_to_atomaa[protein_aatype]
__a : Any = restype_atomaa_mask[protein_aatype]
__a : Any = residx_atomaa_mask
__a : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__a : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
__a : Any = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__a : int = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__a : str = rc.restype_atoa[restype_letter]
__a : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__a : Optional[Any] = rc.atom_order[atom_name]
__a : str = 1
__a : str = restype_atomaa_mask[protein_aatype]
__a : List[str] = residx_atomaa_mask
return protein
def __UpperCamelCase ( lowerCAmelCase__ : Dict[str, torch.Tensor] ):
__a : List[Any] = tree_map(lambda lowerCAmelCase__ : torch.tensor(lowerCAmelCase__ , device=batch['''aatype'''].device ) , lowerCAmelCase__ , np.ndarray )
__a : Optional[int] = tensor_tree_map(lambda lowerCAmelCase__ : np.array(lowerCAmelCase__ ) , make_atomaa_masks(lowerCAmelCase__ ) )
return out
| 521 |
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int = 2_0 ):
__a : Union[str, Any] = 1
for i in range(1 , n + 1 ):
__a : Dict = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 521 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowercase = '''docs/source/en/_toctree.yml'''
def snake_case__ ( _A: List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase = defaultdict(_A )
lowerCAmelCase = []
lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(_A )
lowerCAmelCase = new_doc_list
lowerCAmelCase = [key for key, value in counts.items() if value > 1]
lowerCAmelCase = []
for duplicate_key in duplicates:
lowerCAmelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowerCAmelCase = sorted(_A , key=lambda _A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_A ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(_A )
# Sort
return overview_doc
def snake_case__ ( _A: str=False ) -> Optional[int]:
'''simple docstring'''
with open(_A , encoding="""utf-8""" ) as f:
lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCAmelCase = api_doc[scheduler_idx]["""sections"""]
lowerCAmelCase = clean_doc_toc(_A )
lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
lowerCAmelCase = True
if overwrite:
lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
lowerCAmelCase = api_doc
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def snake_case__ ( _A: Optional[int]=False ) -> Union[str, Any]:
'''simple docstring'''
with open(_A , encoding="""utf-8""" ) as f:
lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCAmelCase = False
lowerCAmelCase = api_doc[pipeline_idx]["""sections"""]
lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCAmelCase = pipeline_doc["""section"""]
lowerCAmelCase = clean_doc_toc(_A )
if overwrite:
lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(_A )
# sort overall pipeline doc
lowerCAmelCase = clean_doc_toc(_A )
if new_pipeline_docs != pipeline_docs:
lowerCAmelCase = True
if overwrite:
lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
lowerCAmelCase = api_doc
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowercase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 605 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case__ ( _A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def snake_case__ ( _A: Dict , _A: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def snake_case__ ( _A: Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def snake_case__ ( _A: Union[str, Any] , _A: str , _A: List[Any] , _A: List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = 1000
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = lowerCAmelCase = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowerCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowerCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase = [2, 2, 20]
lowerCAmelCase = [3, 12, 16]
lowerCAmelCase = [192, 768, 1024]
lowerCAmelCase = CvtForImageClassification(_A )
lowerCAmelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowerCAmelCase = image_size
lowerCAmelCase = torch.load(_A , map_location=torch.device("""cpu""" ) )
lowerCAmelCase = OrderedDict()
lowerCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase = list_of_state_dict + cls_token(_A )
lowerCAmelCase = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase = list_of_state_dict + attention(_A , _A )
lowerCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 605 | 1 |
import math
def a ( ):
'''simple docstring'''
lowercase_ = input('''Enter message: ''' )
lowercase_ = int(input(F'''Enter key [2-{len(snake_case__ ) - 1}]: ''' ) )
lowercase_ = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase_ = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowercase_ = decrypt_message(snake_case__ , snake_case__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + '|'}''' )
def a ( snake_case__: int , snake_case__: str ):
'''simple docstring'''
lowercase_ = [''''''] * key
for col in range(snake_case__ ):
lowercase_ = col
while pointer < len(snake_case__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case__ )
def a ( snake_case__: int , snake_case__: str ):
'''simple docstring'''
lowercase_ = math.ceil(len(snake_case__ ) / key )
lowercase_ = key
lowercase_ = (num_cols * num_rows) - len(snake_case__ )
lowercase_ = [''''''] * num_cols
lowercase_ = 0
lowercase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase_ = 0
row += 1
return "".join(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 97 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : List[str] = torch.load(lowerCAmelCase , map_location='cpu' )
__magic_name__ : int = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__magic_name__ : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__magic_name__ : List[Any] = v
else:
__magic_name__ : Tuple = v
__magic_name__ : Dict = chkpt['params']
__magic_name__ : str = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
__magic_name__ : List[Any] = chkpt['dico_word2id']
__magic_name__ : Union[str, Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
__magic_name__ : Dict = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__magic_name__ : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
__magic_name__ : Dict = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase :str = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 561 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase( _A , unittest.TestCase ):
__A: Optional[Any] = CTRLTokenizer
__A: int = False
__A: List[Any] = False
def a__ ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Tuple = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_UpperCAmelCase : Dict = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_UpperCAmelCase : Dict = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_UpperCAmelCase : Tuple = {"unk_token": "<unk>"}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCamelCase ) )
def a__ ( self : Tuple , **_lowerCamelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a__ ( self : Tuple , _lowerCamelCase : List[Any] ):
_UpperCAmelCase : str = "adapt react readapt apt"
_UpperCAmelCase : Union[str, Any] = "adapt react readapt apt"
return input_text, output_text
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Dict = "adapt react readapt apt"
_UpperCAmelCase : Tuple = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_UpperCAmelCase : str = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 713 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCamelCase:
__A: Optional[int] = LEDConfig
__A: Dict = {}
__A: List[str] = """gelu"""
def __init__( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Any=7 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=99 , _lowerCamelCase : str=32 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[Any]=20 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]=4 , ):
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : Optional[Any] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : Optional[Any] = pad_token_id
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCAmelCase : str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCAmelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCAmelCase : Union[str, Any] = prepare_led_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Any = tf.concat(
[tf.zeros_like(_lowerCamelCase )[:, :-1], tf.ones_like(_lowerCamelCase )[:, -1:]] , axis=-1 , )
_UpperCAmelCase : int = global_attention_mask
return config, inputs_dict
def a__ ( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ):
_UpperCAmelCase : Any = TFLEDModel(config=_lowerCamelCase ).get_decoder()
_UpperCAmelCase : List[str] = inputs_dict["input_ids"]
_UpperCAmelCase : List[Any] = input_ids[:1, :]
_UpperCAmelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase : Optional[Any] = 1
# first forward pass
_UpperCAmelCase : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_UpperCAmelCase : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1E-3 )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase : List[str] = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCamelCase( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A: List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A: Dict = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A: Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A: Union[str, Any] = True
__A: Tuple = False
__A: List[str] = False
__A: Dict = False
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = TFLEDModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=_lowerCamelCase )
def a__ ( self : Any ):
self.config_tester.run_common_tests()
def a__ ( self : int ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
def a__ ( self : List[str] ):
_UpperCAmelCase ,_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Any = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCAmelCase : int = 2
_UpperCAmelCase : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCAmelCase : Any = True
_UpperCAmelCase : List[Any] = self.model_tester.seq_length
_UpperCAmelCase : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowerCamelCase : str ):
_UpperCAmelCase : int = outputs.decoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowerCamelCase : Any ):
_UpperCAmelCase : Any = [t.numpy() for t in outputs.encoder_attentions]
_UpperCAmelCase : List[str] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCAmelCase : int = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = model_class(_lowerCamelCase )
_UpperCAmelCase : Any = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_UpperCAmelCase : Optional[int] = len(_lowerCamelCase )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
if self.is_encoder_decoder:
_UpperCAmelCase : Dict = model_class(_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_decoder_attentions_output(_lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Any = True
_UpperCAmelCase : Any = model_class(_lowerCamelCase )
_UpperCAmelCase : Any = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = model_class(_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def a__ ( self : List[Any] ):
pass
def a__ ( self : Tuple ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa )
__lowerCamelCase = 1e-4
@slow
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def a__ ( self : Any ):
_UpperCAmelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCAmelCase : int = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase : Optional[int] = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase : Optional[Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Tuple = model(**_lowerCamelCase )[0]
_UpperCAmelCase : List[Any] = (1, 10_24, 7_68)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
_UpperCAmelCase : Dict = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-3 )
def a__ ( self : List[str] ):
_UpperCAmelCase : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCAmelCase : Tuple = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase : List[Any] = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase : Any = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = model(**_lowerCamelCase )[0]
_UpperCAmelCase : List[Any] = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
_UpperCAmelCase : int = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1E-3 , rtol=1E-3 )
| 328 | 0 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
UpperCAmelCase_: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A__ ) != do_lower_case
or normalizer_state.get("strip_accents" , A__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A__ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Any = getattr(A__ , normalizer_state.pop("type" ) )
UpperCAmelCase_: Any = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: Optional[int] = normalizer_class(**A__ )
UpperCAmelCase_: Dict = do_lower_case
def snake_case_ ( self , A__ , A__=None ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = [self.sep_token_id]
UpperCAmelCase_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ ) | 137 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = {}
def snake_case ( self ,snake_case__ ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = super().add_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=1 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
output.append(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = placeholder_token + F'_{i}'
self.try_adding_tokens(snake_case__ ,*snake_case__ ,**snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : str = output
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=1.0 ):
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : List[str] = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Tuple = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : Tuple = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace(snake_case__ ,' '.join(snake_case__ ) )
return text
def __call__( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ ,vector_shuffle=snake_case__ ,prop_tokens_to_load=snake_case__ ) ,*snake_case__ ,**snake_case__ ,)
def snake_case ( self ,snake_case__ ,*snake_case__ ,snake_case__=False ,snake_case__=1.0 ,**snake_case__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ ,vector_shuffle=snake_case__ ,prop_tokens_to_load=snake_case__ ) ,*snake_case__ ,**snake_case__ ,)
| 685 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : int = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase_ : Dict = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(a_ ) , torch_builtin(a_ ) ) )
self.assertFalse(torch.allclose(gelu_python(a_ ) , gelu_new(a_ ) ) )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Dict = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase_ : Any = get_activation("gelu" )
lowerCAmelCase_ : List[Any] = get_activation("gelu_10" )
lowerCAmelCase_ : int = torch_builtin(a_ )
lowerCAmelCase_ : Dict = geluaa(a_ )
lowerCAmelCase_ : List[str] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(a_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(a_ ):
get_activation("bogus" )
with self.assertRaises(a_ ):
get_activation(a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Tuple = get_activation("gelu" )
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(a_ ):
lowerCAmelCase_ : Dict = acta.a
| 610 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
lowercase__ = re.compile(r"""([a-z\d])([A-Z])""")
lowercase__ = re.compile(r"""(?<!_)_(?!_)""")
lowercase__ = re.compile(r"""(_{2,})""")
lowercase__ = r"""^\w+(\.\w+)*$"""
lowercase__ = r"""<>:/\|?*"""
def __lowerCamelCase ( __UpperCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Tuple = _uppercase_uppercase_re.sub(r"\1_\2" , __UpperCamelCase )
lowerCAmelCase_ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , __UpperCamelCase )
return name.lower()
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = _single_underscore_re.split(__UpperCamelCase )
lowerCAmelCase_ : str = [_multiple_underscores_re.split(__UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__UpperCamelCase ) if n != "" )
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if os.path.basename(__UpperCamelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(__UpperCamelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __UpperCamelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(__UpperCamelCase )}-{split}'''
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = filename_prefix_for_split(__UpperCamelCase , __UpperCamelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , __UpperCamelCase )
return f'''{filepath}*'''
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : int = filename_prefix_for_split(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
if shard_lengths:
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Any = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__UpperCamelCase )]
if filetype_suffix:
lowerCAmelCase_ : Dict = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowerCAmelCase_ : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 610 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "MCTCTFeatureExtractor"
__UpperCAmelCase : str = "AutoTokenizer"
def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Optional[int]:
super().__init__(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = self.feature_extractor
__snake_case : Tuple = False
def __call__( self : List[str] , *lowerCamelCase : int , **lowerCamelCase : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__snake_case : str = kwargs.pop("raw_speech" )
else:
__snake_case : Dict = kwargs.pop("audio" , lowerCamelCase )
__snake_case : int = kwargs.pop("sampling_rate" , lowerCamelCase )
__snake_case : Tuple = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__snake_case : Optional[Any] = args[0]
__snake_case : Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__snake_case : Optional[int] = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__snake_case : str = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : List[str] = encodings["input_ids"]
return inputs
def __snake_case ( self : str , *lowerCamelCase : Optional[int] , **lowerCamelCase : Dict ) -> Dict:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[int] , **lowerCamelCase : Union[str, Any] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = kwargs.pop("input_features" , lowerCamelCase )
__snake_case : Tuple = kwargs.pop("labels" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__snake_case : Any = args[0]
__snake_case : Union[str, Any] = args[1:]
if input_features is not None:
__snake_case : List[Any] = self.feature_extractor.pad(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if labels is not None:
__snake_case : List[str] = self.tokenizer.pad(lowerCamelCase , **lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__snake_case : Tuple = labels["input_ids"]
return input_features
def __snake_case ( self : int , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> int:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__snake_case : Dict = True
__snake_case : Union[str, Any] = self.tokenizer
yield
__snake_case : str = self.feature_extractor
__snake_case : Dict = False
| 203 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = LDMTextToImagePipeline
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : int = False
def __snake_case ( self : Union[str, Any] ) -> str:
torch.manual_seed(0 )
__snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__snake_case : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
__snake_case : Dict = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case : Any = CLIPTextModel(lowerCamelCase )
__snake_case : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 ) -> str:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : int = LDMTextToImagePipeline(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__snake_case : int = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Dict:
__snake_case : List[Any] = torch.manual_seed(lowerCamelCase )
__snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__snake_case : int = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : List[str] = self.get_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__snake_case : int = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
__snake_case : List[str] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Optional[Any]:
__snake_case : int = torch.manual_seed(lowerCamelCase )
__snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str] ) -> int:
__snake_case : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_inputs(lowerCamelCase )
__snake_case : str = pipe(**lowerCamelCase ).images[0]
__snake_case : Tuple = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__snake_case : str = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 203 | 1 |
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
snake_case__ = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
snake_case__ , snake_case__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
snake_case__ = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__magic_name__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
__magic_name__ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 276 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__magic_name__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 276 | 1 |
from math import ceil
def UpperCamelCase_ ( a_ = 1001 ) ->int:
A =1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A =2 * i + 1
A =2 * i
A =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowercase__( A ):
if not isinstance(A , A ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
snake_case__ : Tuple = precision
snake_case__ : Dict = ceil(precision / 1_4 )
snake_case__ : Optional[int] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case__ : Tuple = 1
snake_case__ : str = 1_3_5_9_1_4_0_9
snake_case__ : List[Any] = Decimal(A )
for k in range(1 , A ):
snake_case__ : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(A ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase : str = 5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 170 |
import numpy
class snake_case_ :
def __init__( self : List[str] , _snake_case : numpy.ndarray , _snake_case : numpy.ndarray )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : Tuple = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int )->numpy.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : int )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Any , _snake_case : numpy.ndarray , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : List[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : numpy.ndarray )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_arr
__lowerCAmelCase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 504 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a : Tuple = logging.get_logger(__name__)
__a : Dict = {'vocab_file': 'sentencepiece.bpe.model'}
__a : Any = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__a : Dict = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__a : int = '▁'
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE =PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE =['input_ids', 'attention_mask']
def __init__( self: str , __A: Optional[int] , __A: Any="<s>" , __A: Tuple="</s>" , __A: Any="</s>" , __A: Any="<s>" , __A: List[Any]="<unk>" , __A: int="<pad>" , __A: Any="<mask>" , __A: Optional[Dict[str, Any]] = None , **__A: Tuple , ):
'''simple docstring'''
a__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a__ = len(self.sp_model ) - 1
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase ( self: Tuple , __A: List[int] , __A: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self: Tuple , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def lowercase ( self: str , __A: List[int] , __A: Optional[List[int]] = None ):
'''simple docstring'''
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self: Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self: Optional[int] , __A: str ):
'''simple docstring'''
return self.sp_model.encode(__A , out_type=__A )
def lowercase ( self: Dict , __A: Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ = self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def lowercase ( self: str , __A: List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def lowercase ( self: Dict , __A: int ):
'''simple docstring'''
a__ = []
a__ = ''''''
a__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
a__ = True
a__ = []
else:
current_sub_tokens.append(__A )
a__ = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self: int ):
'''simple docstring'''
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self: Union[str, Any] , __A: List[Any] ):
'''simple docstring'''
a__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , __A: str , __A: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 711 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__a : int = None
__a : List[str] = logging.get_logger(__name__)
__a : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__a : str = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
__a : Optional[int] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__a : Union[str, Any] = '▁'
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE =PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE =AlbertTokenizer
def __init__( self: List[Any] , __A: List[Any]=None , __A: Tuple=None , __A: Union[str, Any]=True , __A: int=True , __A: Optional[int]=False , __A: List[Any]="[CLS]" , __A: Optional[int]="[SEP]" , __A: Union[str, Any]="<unk>" , __A: int="[SEP]" , __A: Optional[Any]="<pad>" , __A: Optional[int]="[CLS]" , __A: str="[MASK]" , **__A: List[Any] , ):
'''simple docstring'''
a__ = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
a__ = do_lower_case
a__ = remove_space
a__ = keep_accents
a__ = vocab_file
a__ = False if not self.vocab_file else True
def lowercase ( self: Union[str, Any] , __A: List[int] , __A: Optional[List[int]] = None ):
'''simple docstring'''
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self: List[Any] , __A: List[int] , __A: Optional[List[int]] = None ):
'''simple docstring'''
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self: Union[str, Any] , __A: str , __A: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 200 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__snake_case = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__snake_case = logging.WARNING
def _lowerCamelCase ( ):
lowercase__ : Union[str, Any] = os.getenv("""DATASETS_VERBOSITY""" , snake_case__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def _lowerCamelCase ( ):
return __name__.split(""".""" )[0]
def _lowerCamelCase ( ):
return logging.getLogger(_get_library_name() )
def _lowerCamelCase ( ):
# Apply our default configuration to the library root logger.
lowercase__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _lowerCamelCase ( ):
lowercase__ : str = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _lowerCamelCase ( lowerCamelCase__ : int = None ):
if name is None:
lowercase__ : int = _get_library_name()
return logging.getLogger(snake_case__ )
def _lowerCamelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def _lowerCamelCase ( lowerCamelCase__ : Any ):
_get_library_root_logger().setLevel(snake_case__ )
def _lowerCamelCase ( ):
return set_verbosity(snake_case__ )
def _lowerCamelCase ( ):
return set_verbosity(snake_case__ )
def _lowerCamelCase ( ):
return set_verbosity(snake_case__ )
def _lowerCamelCase ( ):
return set_verbosity(snake_case__ )
def _lowerCamelCase ( ):
lowercase__ : Optional[Any] = False
def _lowerCamelCase ( ):
lowercase__ : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: # pylint: disable=unused-argument
lowercase__ : Any = args[0] if args else None
def __iter__( self ) -> Union[str, Any]:
return iter(self._iterator )
def __getattr__( self , lowerCamelCase__ ) -> Union[str, Any]:
def empty_fn(*lowerCamelCase__ , **lowerCamelCase__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[Any]:
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return
__snake_case = True
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __call__( self , *lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase__( self ) -> int:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case = _tqdm_cls()
def _lowerCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _lowerCamelCase ( ):
global _tqdm_active
lowercase__ : Any = True
def _lowerCamelCase ( ):
global _tqdm_active
lowercase__ : Any = False | 200 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _lowercase ( a_ : List[str] ) -> str:
'''simple docstring'''
__magic_name__ = int(a_ )
__magic_name__, __magic_name__, __magic_name__ = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def _lowercase ( a_ : Dict ,a_ : Tuple ,a_ : Union[str, Any] ,a_ : List[str] ,a_ : int=3_0_0 ) -> Optional[int]:
'''simple docstring'''
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def _lowercase ( a_ : str ) -> Dict:
'''simple docstring'''
__magic_name__ = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__magic_name__ = F'{elt:.6f}' if isinstance(a_ ,a_ ) else str(a_ )
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __UpperCamelCase :
_lowercase : Any = 5
_lowercase : Dict = 0.2
def __init__( self: Tuple , __UpperCamelCase: int , __UpperCamelCase: Optional[str] = None , __UpperCamelCase: bool = True , __UpperCamelCase: Optional["NotebookTrainingTracker"] = None , __UpperCamelCase: int = 3_00 , ):
'''simple docstring'''
__magic_name__ = total
__magic_name__ = '' if prefix is None else prefix
__magic_name__ = leave
__magic_name__ = parent
__magic_name__ = width
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self: int , __UpperCamelCase: int , __UpperCamelCase: bool = False , __UpperCamelCase: str = None ):
'''simple docstring'''
__magic_name__ = value
if comment is not None:
__magic_name__ = comment
if self.last_value is None:
__magic_name__ = __magic_name__ = time.time()
__magic_name__ = __magic_name__ = value
__magic_name__ = __magic_name__ = None
__magic_name__ = self.warmup
__magic_name__ = 1
self.update_bar(__UpperCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__magic_name__ = time.time()
__magic_name__ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__magic_name__ = self.elapsed_time / (value - self.start_value)
else:
__magic_name__ = None
if value >= self.total:
__magic_name__ = self.total
__magic_name__ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__magic_name__ = self.average_time_per_item * (self.total - value)
self.update_bar(__UpperCamelCase )
__magic_name__ = value
__magic_name__ = current_time
if self.average_time_per_item is None:
__magic_name__ = 1
else:
__magic_name__ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _SCREAMING_SNAKE_CASE ( self: List[str] , __UpperCamelCase: Dict , __UpperCamelCase: Optional[Any]=None ):
'''simple docstring'''
__magic_name__ = ' ' * (len(str(self.total ) ) - len(str(__UpperCamelCase ) )) + str(__UpperCamelCase )
if self.elapsed_time is None:
__magic_name__ = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
__magic_name__ = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
__magic_name__ = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__magic_name__ = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: Dict=None ):
'''simple docstring'''
super().__init__(__UpperCamelCase )
__magic_name__ = None if column_names is None else [column_names]
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__magic_name__ = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: Optional[Any] ):
'''simple docstring'''
if self.inner_table is None:
__magic_name__ = [list(values.keys() ), list(values.values() )]
else:
__magic_name__ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__UpperCamelCase )
__magic_name__ = columns
self.inner_table.append([values[c] for c in columns] )
def _SCREAMING_SNAKE_CASE ( self: List[str] , __UpperCamelCase: Optional[int] , __UpperCamelCase: List[Any]=None , __UpperCamelCase: List[str]=3_00 ):
'''simple docstring'''
__magic_name__ = NotebookProgressBar(__UpperCamelCase , prefix=__UpperCamelCase , parent=self , width=__UpperCamelCase )
return self.child_bar
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = None
self.display()
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self: List[str] , __UpperCamelCase: int , __UpperCamelCase: Dict , __UpperCamelCase: Optional[int] , **__UpperCamelCase: List[str] ):
'''simple docstring'''
__magic_name__ = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__magic_name__ = NotebookTrainingTracker(state.max_steps , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: int , __UpperCamelCase: int , __UpperCamelCase: List[str] , __UpperCamelCase: List[Any] , **__UpperCamelCase: Tuple ):
'''simple docstring'''
__magic_name__ = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , __UpperCamelCase: int , __UpperCamelCase: Optional[int] , __UpperCamelCase: int , __UpperCamelCase: Tuple=None , **__UpperCamelCase: Tuple ):
'''simple docstring'''
if not has_length(__UpperCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__magic_name__ = self.training_tracker.add_child(len(__UpperCamelCase ) )
else:
__magic_name__ = NotebookProgressBar(len(__UpperCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Optional[int] , **__UpperCamelCase: Dict ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , __UpperCamelCase: Tuple , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: List[Any]=None , **__UpperCamelCase: Optional[Any] ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__magic_name__ = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__magic_name__ = state.global_step
self.training_tracker.write_line(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Optional[int] , __UpperCamelCase: Union[str, Any]=None , **__UpperCamelCase: Optional[int] ):
'''simple docstring'''
if self.training_tracker is not None:
__magic_name__ = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__magic_name__ = log['loss']
break
if self.first_column == "Epoch":
__magic_name__ = int(state.epoch )
else:
__magic_name__ = state.global_step
__magic_name__ = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__magic_name__ = re.sub(r'\_loss$' , '' , __UpperCamelCase )
__magic_name__ = metrics.pop('total_flos' , __UpperCamelCase )
__magic_name__ = metrics.pop('epoch' , __UpperCamelCase )
__magic_name__ = metrics.pop(F'{metric_key_prefix}_runtime' , __UpperCamelCase )
__magic_name__ = metrics.pop(F'{metric_key_prefix}_samples_per_second' , __UpperCamelCase )
__magic_name__ = metrics.pop(F'{metric_key_prefix}_steps_per_second' , __UpperCamelCase )
__magic_name__ = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , __UpperCamelCase )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
__magic_name__ = v
else:
__magic_name__ = k.split('_' )
__magic_name__ = ' '.join([part.capitalize() for part in splits[1:]] )
__magic_name__ = v
self.training_tracker.write_line(__UpperCamelCase )
self.training_tracker.remove_child()
__magic_name__ = None
# Evaluation takes a long time so we should force the next update.
__magic_name__ = True
def _SCREAMING_SNAKE_CASE ( self: Dict , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: int , __UpperCamelCase: List[str] , **__UpperCamelCase: Union[str, Any] ):
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=__UpperCamelCase )
__magic_name__ = None
| 184 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxBertModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Optional[Any] ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxRobertaModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Any ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ = FlaxAutoModel.from_pretrained('bert-base' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase , 'Use `from_pt=True` to load this model' ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 184 | 1 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Any:
"""simple docstring"""
__UpperCAmelCase : List[str] = tf.convert_to_tensor(__a )
__UpperCAmelCase : Union[str, Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCamelCase_ ( UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__a )
__UpperCAmelCase : str = tf.cast(math.pi , x.dtype )
__UpperCAmelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
__UpperCAmelCase : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__a , 3 )) ))
return x * cdf
def lowerCamelCase_ ( UpperCAmelCase_ ) ->int:
"""simple docstring"""
__UpperCAmelCase : Tuple = tf.convert_to_tensor(__a )
return x * tf.tanh(tf.math.softplus(__a ) )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = tf.convert_to_tensor(__a )
__UpperCAmelCase : Optional[Any] = tf.cast(0.044_715 , x.dtype )
__UpperCAmelCase : Union[str, Any] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Any:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__a )
__UpperCAmelCase : List[str] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return tf.clip_by_value(_gelu(__a ) , -10 , 10 )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=-1 ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : List[Any] = tf.split(__a , 2 , axis=__a )
return a * tf.math.sigmoid(__a )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Any:
"""simple docstring"""
return tf.keras.activations.gelu(__a , approximate=__a )
lowercase__ :Any = tf.keras.activations.gelu
lowercase__ :Union[str, Any] = approximate_gelu_wrap
else:
lowercase__ :Optional[Any] = _gelu
lowercase__ :str = _gelu_new
lowercase__ :List[str] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Any:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' ) | 522 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =HfArgumentParser(__a )
SCREAMING_SNAKE_CASE : int =parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : int =TensorFlowBenchmark(args=__a )
try:
SCREAMING_SNAKE_CASE : Dict =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : Any ='''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
SCREAMING_SNAKE_CASE : str =''' '''.join(str(__a ).split(''' ''' )[:-1] )
SCREAMING_SNAKE_CASE : Dict =''''''
SCREAMING_SNAKE_CASE : Tuple =eval(str(__a ).split(''' ''' )[-1] )
SCREAMING_SNAKE_CASE : List[str] =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] =full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 258 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation("""gelu""" )
UpperCamelCase__ = get_activation("""gelu_10""" )
UpperCamelCase__ = torch_builtin(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = geluaa(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowerCamelCase ( self ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation("""bogus""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
UpperCamelCase__ = get_activation("""gelu""" )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = acta.a
| 709 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Any = MODEL_FOR_MASKED_LM_MAPPING
snake_case : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCamelCase__ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__lowerCAmelCase )
@slow
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = fill_masker.tokenizer
UpperCamelCase__ = fill_masker.model
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
with self.assertRaises(__lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowerCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Call argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Score equivalence
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""token_str"""] for top_mask in outputs]
UpperCamelCase__ = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ) == set(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase__ = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ).issubset(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowerCAmelCase ) , 3 )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
| 548 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : List[str] = {}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : Optional[int] = {}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = probability
def lowercase_ ( self ) -> list[str]:
return list(self.connections )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[tuple[str, str, float]] , UpperCAmelCase_ : int ) -> dict[str, int]:
__lowerCamelCase : Any = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = Counter(graph.get_nodes() )
__lowerCamelCase : Optional[int] = start
for _ in range(UpperCAmelCase_ ):
__lowerCamelCase : List[Any] = graph.transition(UpperCAmelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659 | 0 |
import numpy as np
def a__ (__lowercase :np.ndarray , __lowercase :float ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_UpperCamelCase : Union[str, Any] =['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ,A__ ,A__=None ,A__=1 ):
_A : str = tokenizer
_A : Dict = dataset
_A : int = len(A__ ) if n_tasks is None else n_tasks
_A : List[Any] = n_copies
def __iter__( self ):
_A : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_A : Any = self.tokenizer(A__ ,padding=A__ ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ,A__ ,A__ ):
_A : Optional[Any] = start_length
_A : int = eof_strings
_A : int = tokenizer
def __call__( self ,A__ ,A__ ,**A__ ):
_A : Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_A : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(A__ )
def a__ (__lowercase :Optional[Any] ) -> List[Any]:
_A : str = re.split('''(%s)''' % '''|'''.join(__lowercase ) , __lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def a__ (__lowercase :List[str] , __lowercase :Dict , __lowercase :List[str] , __lowercase :Optional[int] , __lowercase :List[Any] , __lowercase :str=20 , **__lowercase :List[str] ) -> Optional[Any]:
_A : Any = defaultdict(__lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowercase ) ):
with torch.no_grad():
_A : int = batch['''ids'''].shape[-1]
_A : Any = accelerator.unwrap_model(__lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowercase , **__lowercase )
# each task is generated batch_size times
_A : Union[str, Any] = batch['''task_id'''].repeat(__lowercase )
_A : int = accelerator.pad_across_processes(
__lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
_A , _A : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
_A : List[str] = generated_tokens.cpu().numpy()
_A : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowercase , __lowercase ):
gen_token_dict[task].append(__lowercase )
_A : Tuple = [[] for _ in range(__lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_A : Dict = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
code_gens[task].append(remove_last_block(__lowercase ) )
return code_gens
def a__ () -> Dict:
# Setup configuration
_A : Any = HfArgumentParser(__lowercase )
_A : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_A : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_A : List[Any] = '''false'''
if args.num_workers is None:
_A : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_A : List[str] = Accelerator()
set_seed(args.seed , device_specific=__lowercase )
# Load model and tokenizer
_A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_A : int = tokenizer.eos_token
_A : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_A : int = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowercase , __lowercase )] ),
}
# Load evaluation dataset and metric
_A : str = load_dataset('''openai_humaneval''' )
_A : List[Any] = load_metric('''code_eval''' )
_A : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_A : Tuple = args.n_samples // args.batch_size
_A : List[Any] = TokenizedDataset(__lowercase , human_eval['''test'''] , n_copies=__lowercase , n_tasks=__lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_A : Tuple = DataLoader(__lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_A : List[str] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_A , _A : Optional[Any] = accelerator.prepare(__lowercase , __lowercase )
_A : Tuple = complete_code(
__lowercase , __lowercase , __lowercase , __lowercase , n_tasks=__lowercase , batch_size=args.batch_size , **__lowercase , )
if accelerator.is_main_process:
_A : int = []
for task in tqdm(range(__lowercase ) ):
_A : List[str] = human_eval['''test'''][task]['''test''']
_A : int = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_A , _A : Union[str, Any] = code_eval_metric.compute(
references=__lowercase , predictions=__lowercase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowercase , __lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 332 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( _lowercase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **__lowerCamelCase) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_A : List[Any] = deprecated_arg[3:]
_A : Tuple = not kwargs.pop(A_)
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}")
_A : Optional[Any] = kwargs.pop("tpu_name" , self.tpu_name)
_A : List[str] = kwargs.pop("device_idx" , self.device_idx)
_A : int = kwargs.pop("eager_mode" , self.eager_mode)
_A : Any = kwargs.pop("use_xla" , self.use_xla)
super().__init__(**A_)
__SCREAMING_SNAKE_CASE = field(
default=_lowercase , metadata={"help": "Name of TPU"} , )
__SCREAMING_SNAKE_CASE = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
__SCREAMING_SNAKE_CASE = field(default=_lowercase , metadata={"help": "Benchmark models in eager model."})
__SCREAMING_SNAKE_CASE = field(
default=_lowercase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _lowerCamelCase ( self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"])
_A : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
_A : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
_A : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_A : Optional[int] = None
return tpu
@cached_property
def _lowerCamelCase ( self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
_A : str = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU")
_A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}")
else:
tf.config.set_visible_devices([] , "GPU") # disable GPU
_A : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}")
return strategy
@property
def _lowerCamelCase ( self) -> bool:
requires_backends(self , ["tf"])
return self._setup_tpu is not None
@property
def _lowerCamelCase ( self) -> "tf.distribute.Strategy":
requires_backends(self , ["tf"])
return self._setup_strategy
@property
def _lowerCamelCase ( self) -> Dict:
requires_backends(self , ["tf"])
return tf.config.list_physical_devices("GPU")
@property
def _lowerCamelCase ( self) -> int:
requires_backends(self , ["tf"])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def _lowerCamelCase ( self) -> bool:
return self.n_gpu > 0
| 503 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ):
A , A = position
A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
A = []
for position in positions:
A , A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case__ )
return permissible_positions
def _snake_case ( snake_case__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ):
if is_complete(snake_case__ ):
return True
for position in get_valid_pos(snake_case__ , len(snake_case__ ) ):
A , A = position
if board[y][x] == 0:
A = curr + 1
if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ):
return True
A = 0
return False
def _snake_case ( snake_case__ : int ):
A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A = 1
if open_knight_tour_helper(snake_case__ , (i, j) , 1 ):
return board
A = 0
A = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase( pl.LightningModule ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
super().__init__()
__a : List[Any] = model
__a : Optional[Any] = 2
__a : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
__a : Union[str, Any] = LongformerModel.from_pretrained(a__ )
__a : Tuple = LightningModel(a__ )
__a : Optional[Any] = torch.load(a__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a : List[Any] = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 716 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 577 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Optional[int] = BarthezTokenizer
__A : Union[str, Any] = BarthezTokenizerFast
__A : List[Any] = True
__A : str = True
def UpperCamelCase( self ):
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCamelCase )
_UpperCAmelCase = tokenizer
def UpperCamelCase( self ):
_UpperCAmelCase = '''<pad>'''
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_UpperCamelCase ) , 101122 )
def UpperCamelCase( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
_UpperCamelCase , max_length=len(_UpperCamelCase ) , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase( self ):
# fmt: off
_UpperCAmelCase = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_UpperCamelCase , ) | 32 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Dict ={
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple =['PerceiverFeatureExtractor']
SCREAMING_SNAKE_CASE__ : int =['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 434 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( _A):
lowerCamelCase :List[str] = ["image_processor", "tokenizer"]
lowerCamelCase :Any = "ViltImageProcessor"
lowerCamelCase :Optional[int] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase ) -> Tuple:
lowerCamelCase : Tuple =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
lowerCamelCase : List[Any] =kwargs.pop('''feature_extractor''' )
lowerCamelCase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
lowerCamelCase : Optional[int] =self.image_processor
def __call__( self , __lowercase , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ) -> BatchEncoding:
lowerCamelCase : Tuple =self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel_values + pixel_mask
lowerCamelCase : List[str] =self.image_processor(__lowercase , return_tensors=__lowercase )
encoding.update(__lowercase )
return encoding
def __lowercase ( self , *__lowercase , **__lowercase ) -> Any:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __lowercase ( self , *__lowercase , **__lowercase ) -> Dict:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __lowercase ( self ) -> Dict:
lowerCamelCase : Tuple =self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Dict:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor
| 262 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
snake_case_ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def A__ ( SCREAMING_SNAKE_CASE_ ) -> int:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCamelCase : List[Any] =list(s_dict.keys() )
for key in keys:
lowerCamelCase : Dict =R'''.*/layers_(\d+)'''
lowerCamelCase : Optional[int] =key
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] =re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =R'''(encoder|decoder)\/'''
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict =re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups()
if groups[0] == "encoder":
lowerCamelCase : Dict =re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
elif groups[0] == "decoder":
lowerCamelCase : List[str] =re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str =re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase : Dict =new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"{key} -> {new_key}" )
lowerCamelCase : Optional[Any] =s_dict.pop(SCREAMING_SNAKE_CASE_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Dict =s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase : Union[str, Any] =s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase : List[Any] =s_dict[key].shape[0]
lowerCamelCase : int =s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
snake_case_ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
lowerCamelCase : str =f.read()
lowerCamelCase : Union[str, Any] =re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase : str =float(SCREAMING_SNAKE_CASE_ ) if '''.''' in value else int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase : Any =str(activation[1] )
lowerCamelCase : Tuple =num_experts
lowerCamelCase : Any =SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ )
return config
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="./" , SCREAMING_SNAKE_CASE_=8 ) -> Dict:
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
lowerCamelCase : List[Any] =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
if gin_file is not None:
lowerCamelCase : Dict =convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =flax_params['''target''']
lowerCamelCase : Optional[int] =flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
lowerCamelCase : str =rename_keys(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] =unflatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
snake_case_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 262 | 1 |
from typing import Any
import numpy as np
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray ) -> bool:
"""simple docstring"""
return np.array_equal(lowerCamelCase_ , matrix.conjugate().T )
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = v.conjugate().T
SCREAMING_SNAKE_CASE_ : Any = v_star.dot(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , np.ndarray )
return (v_star_dot.dot(lowerCamelCase_ )) / (v_star.dot(lowerCamelCase_ ))
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE_ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCamelCase_ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCamelCase_ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowerCamelCase_ , lowerCamelCase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 105 |
'''simple docstring'''
def snake_case ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 )
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 0 |
import math
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Tuple = 0.1 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCamelCase_ :
pass
| 17 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase_ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ = concatenate_datasets
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadManager
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 531 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = StableUnCLIPPipeline
_lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_lowerCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCAmelCase : Union[str, Any] = False
def _snake_case ( self : int ):
snake_case_ : Tuple = 32
snake_case_ : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case_ : List[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
snake_case_ : List[str] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
snake_case_ : Tuple = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
snake_case_ : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
snake_case_ : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case_ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
snake_case_ : Optional[int] = AutoencoderKL()
snake_case_ : int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int]=0 ):
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
snake_case_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def _snake_case ( self : Any ):
snake_case_ : Any = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[str] ):
snake_case_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
snake_case_ : List[Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_ : Optional[Any] = pipe('''anime turle''' , generator=lowercase_ , output_type='''np''' )
snake_case_ : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def _snake_case ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : List[str] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
snake_case_ : Any = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Dict = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
snake_case_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 707 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCAmelCase ( tf.keras.layers.Layer):
def __init__( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , **lowercase_ : Optional[Any] ):
super().__init__(**lowercase_ )
snake_case_ : int = vocab_size
snake_case_ : Union[str, Any] = d_embed
snake_case_ : Optional[int] = d_proj
snake_case_ : int = cutoffs + [vocab_size]
snake_case_ : Optional[int] = [0] + self.cutoffs
snake_case_ : List[str] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Optional[Any] = len(self.cutoffs ) - 1
snake_case_ : Any = self.shortlist_size + self.n_clusters
snake_case_ : Dict = keep_order
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = []
def _snake_case ( self : Dict , lowercase_ : int ):
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowercase_ , name='''cluster_weight''' )
snake_case_ : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowercase_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_projs_._{i}" , )
self.out_projs.append(lowercase_ )
else:
self.out_projs.append(lowercase_ )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._weight" , )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : List[str] = self.d_embed // (self.div_val**i)
snake_case_ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_projs_._{i}" )
self.out_projs.append(lowercase_ )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._weight" , )
snake_case_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(lowercase_ )
@staticmethod
def _snake_case ( lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
snake_case_ : str = x
if proj is not None:
snake_case_ : str = tf.einsum('''ibd,ed->ibe''' , lowercase_ , lowercase_ )
return tf.einsum('''ibd,nd->ibn''' , lowercase_ , lowercase_ ) + b
@staticmethod
def _snake_case ( lowercase_ : Tuple , lowercase_ : Union[str, Any] ):
snake_case_ : Optional[int] = shape_list(lowercase_ )
snake_case_ : Any = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowercase_ , lowercase_ )
def _snake_case ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple=True , lowercase_ : Tuple=False ):
snake_case_ : Optional[int] = 0
if self.n_clusters == 0:
snake_case_ : List[str] = self._logit(lowercase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowercase_ , logits=lowercase_ )
snake_case_ : List[Any] = tf.nn.log_softmax(lowercase_ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(lowercase_ )
snake_case_ : Any = []
snake_case_ : List[str] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : Optional[Any] = (target >= l_idx) & (target < r_idx)
snake_case_ : Any = tf.where(lowercase_ )
snake_case_ : List[Any] = tf.boolean_mask(lowercase_ , lowercase_ ) - l_idx
if self.div_val == 1:
snake_case_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : List[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Dict = self.out_layers[i][0]
snake_case_ : str = self.out_layers[i][1]
if i == 0:
snake_case_ : Tuple = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : List[str] = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : List[Any] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[0] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(lowercase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : int = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : str = self._gather_logprob(lowercase_ , lowercase_ )
else:
snake_case_ : Union[str, Any] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[i] )
snake_case_ : List[str] = tf.nn.log_softmax(lowercase_ )
snake_case_ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowercase_ )
if target is not None:
snake_case_ : Optional[int] = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : Dict = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = self._gather_logprob(lowercase_ , lowercase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowercase_ , -cur_logprob , shape_list(lowercase_ ) )
snake_case_ : Union[str, Any] = tf.concat(lowercase_ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : Dict = tf.reduce_mean(lowercase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowercase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowercase_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 485 | 0 |
"""simple docstring"""
from __future__ import annotations
_lowercase = list[list[int]]
# assigning initial values to the grid
_lowercase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowercase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ->Optional[Any]:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase__ ( __magic_name__ ) ->List[str]:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase__ ( __magic_name__ ) ->Tuple:
if location := find_empty_location(__SCREAMING_SNAKE_CASE ):
__lowercase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__lowercase = digit
if sudoku(__SCREAMING_SNAKE_CASE ) is not None:
return grid
__lowercase = 0
return None
def lowerCAmelCase__ ( __magic_name__ ) ->Optional[int]:
for row in grid:
for cell in row:
print(__SCREAMING_SNAKE_CASE , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
_lowercase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 118 | """simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase :
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _UpperCAmelCase ( self ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=False ,) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = 4
lowercase_ : Any = 32
lowercase_ : Optional[int] = (32, 32)
lowercase_ : List[str] = torch.manual_seed(0 )
lowercase_ : List[Any] = torch.device(__UpperCamelCase )
lowercase_ : List[str] = (batch_size, num_channels) + sizes
lowercase_ : Any = randn_tensor(__UpperCamelCase ,generator=__UpperCamelCase ,device=__UpperCamelCase )
lowercase_ : List[Any] = {'hidden_states': hidden_states}
if include_temb:
lowercase_ : Tuple = 128
lowercase_ : List[str] = randn_tensor((batch_size, temb_channels) ,generator=__UpperCamelCase ,device=__UpperCamelCase )
if include_res_hidden_states_tuple:
lowercase_ : Tuple = torch.manual_seed(1 )
lowercase_ : Optional[Any] = (randn_tensor(__UpperCamelCase ,generator=__UpperCamelCase ,device=__UpperCamelCase ),)
if include_encoder_hidden_states:
lowercase_ : Any = floats_tensor((batch_size, 32, 32) ).to(__UpperCamelCase )
if include_skip_sample:
lowercase_ : Dict = randn_tensor(((batch_size, 3) + sizes) ,generator=__UpperCamelCase ,device=__UpperCamelCase )
return dummy_input
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowercase_ : List[Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowercase_ : Any = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowercase_ : str = self.block_class(**__UpperCamelCase )
unet_block.to(__UpperCamelCase )
unet_block.eval()
with torch.no_grad():
lowercase_ : Dict = unet_block(**__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : int = output[0]
self.assertEqual(output.shape ,self.output_shape )
lowercase_ : str = output[0, -1, -3:, -3:]
lowercase_ : int = torch.tensor(__UpperCamelCase ).to(__UpperCamelCase )
assert torch_all_close(output_slice.flatten() ,__UpperCamelCase ,atol=5e-3 )
@unittest.skipIf(torch_device == 'mps' ,'Training is not supported in mps' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase_ : Optional[int] = self.block_class(**__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : Any = model(**__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = output[0]
lowercase_ : Union[str, Any] = torch.device(__UpperCamelCase )
lowercase_ : Any = randn_tensor(output.shape ,device=__UpperCamelCase )
lowercase_ : List[str] = torch.nn.functional.mse_loss(__UpperCamelCase ,__UpperCamelCase )
loss.backward()
| 425 | 0 |
'''simple docstring'''
__lowercase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 716 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case__ ( _A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def snake_case__ ( _A: Dict , _A: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def snake_case__ ( _A: Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def snake_case__ ( _A: Union[str, Any] , _A: str , _A: List[Any] , _A: List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = 1000
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = lowerCAmelCase = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowerCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowerCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase = [2, 2, 20]
lowerCAmelCase = [3, 12, 16]
lowerCAmelCase = [192, 768, 1024]
lowerCAmelCase = CvtForImageClassification(_A )
lowerCAmelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowerCAmelCase = image_size
lowerCAmelCase = torch.load(_A , map_location=torch.device("""cpu""" ) )
lowerCAmelCase = OrderedDict()
lowerCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase = list_of_state_dict + cls_token(_A )
lowerCAmelCase = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase = list_of_state_dict + attention(_A , _A )
lowerCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 605 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[str] )-> int:
"""simple docstring"""
UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
UpperCAmelCase = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase = model.generate(**lowerCAmelCase )
UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase = model_reloaded.generate(**lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase ):
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase )
| 210 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( A : Dict , A : Optional[Any] ):
'''simple docstring'''
return (-y * np.log(A ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( A : Union[str, Any] , A : Any , A : int ):
'''simple docstring'''
UpperCAmelCase = np.dot(A , A )
return np.sum(y * scores - np.log(1 + np.exp(A ) ) )
def lowerCamelCase__ ( A : List[str] , A : str , A : int , A : Dict=7_00_00 ):
'''simple docstring'''
UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(A ):
UpperCAmelCase = np.dot(A , A )
UpperCAmelCase = sigmoid_function(A )
UpperCAmelCase = np.dot(x.T , h - y ) / y.size
UpperCAmelCase = theta - alpha * gradient # updating the weights
UpperCAmelCase = np.dot(A , A )
UpperCAmelCase = sigmoid_function(A )
UpperCAmelCase = cost_function(A , A )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowercase : List[str] = datasets.load_iris()
_lowercase : List[Any] = iris.data[:, :2]
_lowercase : Union[str, Any] = (iris.target != 0) * 1
_lowercase : Any = 0.1
_lowercase : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return sigmoid_function(
np.dot(A , A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_lowercase) , (_lowercase)) : Optional[Any] = (x[:, 0].min(), x[:, 0].max())
((_lowercase) , (_lowercase)) : Any = (x[:, 1].min(), x[:, 1].max())
((_lowercase) , (_lowercase)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowercase : Any = np.c_[xxa.ravel(), xxa.ravel()]
_lowercase : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 210 | 1 |
from __future__ import annotations
import math
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = u
for i in range(1 , _UpperCAmelCase ):
lowercase = temp * (u - i)
return temp
def __snake_case ( ):
"""simple docstring"""
lowercase = int(input('enter the numbers of values: ' ) )
lowercase = []
for _ in range(_UpperCAmelCase ):
y.append([] )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
y[i].append(_UpperCAmelCase )
lowercase = 0
print('enter the values of parameters in a list: ' )
lowercase = list(map(_UpperCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_UpperCAmelCase ):
lowercase = float(input() )
lowercase = int(input('enter the value to interpolate: ' ) )
lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _UpperCAmelCase ):
for j in range(n - i ):
lowercase = y[j + 1][i - 1] - y[j][i - 1]
lowercase = y[0][0]
for i in range(1 , _UpperCAmelCase ):
summ += (ucal(_UpperCAmelCase , _UpperCAmelCase ) * y[0][i]) / math.factorial(_UpperCAmelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 719 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _UpperCAmelCase = 35 ):
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 0 |
from jiwer import compute_measures
import datasets
__a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Tuple ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> str:
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["wer"]
else:
lowercase_ = 0
lowercase_ = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = compute_measures(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 97 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_UpperCamelCase )
# Let's go
__UpperCAmelCase : int = parser.parse_args()
if not hasattr(_UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : List[str] = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 139 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 270 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
__a = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
__a = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE__, 1 ):
if n < _p:
# then we have our last prime to check
__a = primes[:idx]
break
__a , __a = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__a = False
for r in range(SCREAMING_SNAKE_CASE__ ):
__a = pow(SCREAMING_SNAKE_CASE__, d * 2**r, SCREAMING_SNAKE_CASE__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__a = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 270 | 1 |
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : List[Any] ):
__A = val
__A = None
__A = None
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if self.val:
if val < self.val:
if self.left is None:
__A = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
__A = Node(A )
else:
self.right.insert(A )
else:
__A = val
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
if root:
inorder(root.left , a_ )
res.append(root.val )
inorder(root.right , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
if len(a_ ) == 0:
return arr
__A = Node(arr[0] )
for i in range(1 , len(a_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A = []
inorder(a_ , a_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 55 |
"""simple docstring"""
def A ( _A, _A ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_A, x % y )
def A ( _A, _A ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_A, _A )
def A ( _A = 20 ):
"""simple docstring"""
snake_case_ :Dict = 1
for i in range(1, n + 1 ):
snake_case_ :Dict = lcm(_A, _A )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCamelCase : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE , '''words.txt''' )
UpperCamelCase__ : str = ''''''
with open(SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase__ : List[str] = f.readline()
UpperCamelCase__ : int = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
UpperCamelCase__ : Tuple = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 106 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Dict = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = tensor[:sequence_length]
else:
UpperCamelCase__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = tensor[:sequence_length]
else:
UpperCamelCase__ : str = tensor[:sequence_length]
return out_tensor.tolist()
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ord(SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCamelCase__ : Any = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __magic_name__ ( __lowerCAmelCase):
A: PreTrainedTokenizerBase
A: Union[bool, str, PaddingStrategy] = True
A: Optional[int] = None
A: Optional[int] = None
A: int = -1_0_0
A: str = "pt"
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
import torch
UpperCamelCase__ : Tuple = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCamelCase__ : Optional[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCamelCase__ : Optional[int] = self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCamelCase__ : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCamelCase__ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCamelCase__ : Optional[int] = [
list(lowerCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) for label in labels
]
else:
UpperCamelCase__ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) + list(lowerCamelCase__ ) for label in labels
]
UpperCamelCase__ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCamelCase__ : List[str] = padding_tensor(lowerCamelCase__ , -1 , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = [feature['''original_entity_spans'''] for feature in features]
UpperCamelCase__ : Any = padding_tensor(lowerCamelCase__ , (-1, -1) , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = {k: torch.tensor(lowerCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 106 | 1 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self ) -> None:
'''simple docstring'''
_UpperCAmelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_UpperCAmelCase : Any = False
def __snake_case ( self , _A ) -> None:
'''simple docstring'''
for word in words:
self.insert(a_ )
def __snake_case ( self , _A ) -> None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase : Optional[Any] = TrieNode()
_UpperCAmelCase : Optional[int] = curr.nodes[char]
_UpperCAmelCase : Optional[Any] = True
def __snake_case ( self , _A ) -> bool:
'''simple docstring'''
_UpperCAmelCase : str = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase : Tuple = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , _A ) -> None:
'''simple docstring'''
def _delete(_A , _A , _A ) -> bool:
if index == len(a_ ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase : int = False
return len(curr.nodes ) == 0
_UpperCAmelCase : Optional[Any] = word[index]
_UpperCAmelCase : int = curr.nodes.get(a_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase : str = _delete(a_ , a_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , a_ , 0 )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Any ) -> None:
if node.is_leaf:
print(__UpperCAmelCase, end=""" """ )
for key, value in node.nodes.items():
print_words(__UpperCAmelCase, word + key )
def UpperCamelCase ( ) -> bool:
_UpperCAmelCase : str = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase : str = TrieNode()
root.insert_many(__UpperCAmelCase )
# print_words(root, "")
assert all(root.find(__UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : Any ) -> None:
print(str(__UpperCAmelCase ), """works!""" if passes else """doesn\'t work :(""" )
def UpperCamelCase ( ) -> None:
assert test_trie()
def UpperCamelCase ( ) -> None:
print_results("""Testing trie functionality""", test_trie() )
if __name__ == "__main__":
main()
| 238 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = '▁'
a = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
a = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
a = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
a = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class a_ ( snake_case ):
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any]=None , a_ : str=None , a_ : Tuple="<s>" , a_ : int="</s>" , a_ : int="</s>" , a_ : Any="<pad>" , a_ : Union[str, Any]="<unk>" , a_ : Optional[Any]="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Dict=8 , **a_ : Union[str, Any] , ) -> None:
snake_case: Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
snake_case: Union[str, Any] =language_codes
snake_case: Optional[int] =FAIRSEQ_LANGUAGE_CODES[language_codes]
snake_case: Dict ={lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
snake_case: Optional[int] =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
snake_case: int =vocab_file
snake_case: int =load_json(a_ )
snake_case: Tuple ={v: k for k, v in self.encoder.items()}
snake_case: Any =spm_file
snake_case: int =load_spm(a_ , self.sp_model_kwargs )
snake_case: List[Any] =len(self.encoder )
snake_case: Optional[Any] ={
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
snake_case: List[str] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
snake_case: int ={v: k for k, v in self.lang_token_to_id.items()}
snake_case: Any =src_lang if src_lang is not None else 'en'
snake_case: Optional[int] =tgt_lang
snake_case: Optional[int] =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
snake_case: str =num_madeup_words
@property
def UpperCamelCase ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self : List[Any] , a_ : str ) -> None:
snake_case: Optional[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self : Optional[int] , a_ : str ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCamelCase ( self : List[Any] , a_ : Optional[int] ) -> Union[str, Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCamelCase ( self : str , a_ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCamelCase ( self : int , a_ : Optional[Any] ) -> Tuple:
snake_case: List[Any] =[]
snake_case: List[str] =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
snake_case: Optional[Any] =[]
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCamelCase ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
snake_case: List[Any] =[1] * len(self.prefix_tokens )
snake_case: Optional[int] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self : Any ) -> Dict:
snake_case: List[str] ={self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
snake_case: Optional[int] =self.__dict__.copy()
snake_case: List[Any] =None
return state
def __setstate__( self : int , a_ : Dict ) -> None:
snake_case: Any =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Optional[int] ={}
snake_case: List[str] =load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self : str , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
snake_case: Tuple =Path(a_ )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
snake_case: Union[str, Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
snake_case: List[Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , 'wb' ) as fi:
snake_case: Any =self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : List[str] , ) -> BatchEncoding:
snake_case: List[str] =src_lang
snake_case: Optional[int] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCamelCase ( self : Union[str, Any] , a_ : int , a_ : Optional[str] , a_ : Optional[str] , **a_ : Union[str, Any] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case: List[str] =src_lang
snake_case: List[str] =self(a_ , add_special_tokens=a_ , **a_ )
snake_case: List[Any] =self.get_lang_id(a_ )
snake_case: List[str] =tgt_lang_id
return inputs
def UpperCamelCase ( self : str ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self : Dict ) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self : List[str] , a_ : str ) -> None:
snake_case: Optional[Any] =self.get_lang_token(a_ )
snake_case: Optional[Any] =self.lang_token_to_id[lang_token]
snake_case: Optional[Any] =[self.cur_lang_id]
snake_case: Optional[int] =[self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , a_ : str ) -> None:
snake_case: Dict =self.get_lang_token(a_ )
snake_case: Tuple =self.lang_token_to_id[lang_token]
snake_case: Optional[int] =[self.cur_lang_id]
snake_case: List[str] =[self.eos_token_id]
def UpperCamelCase ( self : Union[str, Any] , a_ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase ( self : int , a_ : str ) -> int:
snake_case: Union[str, Any] =self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
snake_case: Dict =sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def a_ ( __UpperCAmelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
"""simple docstring"""
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 350 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
return self._get_dummy_components()
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0) -> Dict:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE).startswith("mps"):
UpperCamelCase__ : Dict =torch.manual_seed(__SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : Optional[int] =torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA")
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 582 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''CLIPImageProcessor'''
snake_case__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict =kwargs.pop("feature_extractor")
UpperCamelCase__ : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if images is not None:
UpperCamelCase__ : int =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
UpperCamelCase__ : str =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict =self.tokenizer.model_input_names
UpperCamelCase__ : List[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 582 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = DDIMPipeline
__SCREAMING_SNAKE_CASE :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE :Tuple = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__SCREAMING_SNAKE_CASE :Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :Optional[int] = False
def snake_case__ ( self : Optional[int] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__magic_name__ = DDIMScheduler()
__magic_name__ = {'''unet''': unet, '''scheduler''': scheduler}
return components
def snake_case__ ( self : Optional[int] , a__ : str , a__ : str=0 ):
if str(lowerCamelCase_ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(lowerCamelCase_ )
else:
__magic_name__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
__magic_name__ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = '''cpu'''
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__magic_name__ = self.get_dummy_inputs(lowerCamelCase_ )
__magic_name__ = pipe(**lowerCamelCase_ ).images
__magic_name__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__magic_name__ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__magic_name__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1E-3 )
def snake_case__ ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case__ ( self : List[str] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def snake_case__ ( self : Dict ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def snake_case__ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = '''google/ddpm-cifar10-32'''
__magic_name__ = UNetaDModel.from_pretrained(lowerCamelCase_ )
__magic_name__ = DDIMScheduler()
__magic_name__ = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = ddim(generator=lowerCamelCase_ , eta=0.0 , output_type='''numpy''' ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = '''google/ddpm-ema-bedroom-256'''
__magic_name__ = UNetaDModel.from_pretrained(lowerCamelCase_ )
__magic_name__ = DDIMScheduler.from_pretrained(lowerCamelCase_ )
__magic_name__ = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = ddpm(generator=lowerCamelCase_ , output_type='''numpy''' ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__magic_name__ = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 432 |
"""simple docstring"""
from statistics import mean, stdev
def _A ( _a : list , _a : int = 3 ):
"""simple docstring"""
A = min(_a )
A = max(_a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _a ) for x in data]
def _A ( _a : list , _a : int = 3 ):
"""simple docstring"""
A = mean(_a )
A = stdev(_a )
# standardize data
return [round((x - mu) / (sigma) , _a ) for x in data]
| 617 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 306 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase ( ) -> Optional[Any]:
UpperCAmelCase_: Union[str, Any] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
UpperCAmelCase_: List[str] = Image.open(requests.get(_a ,stream=_a ).raw ).convert("RGB" )
return image
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Any = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def lowercase ( _a ,_a ,_a ) -> Optional[Any]:
UpperCAmelCase_: List[Any] = dct.pop(_a )
UpperCAmelCase_: Union[str, Any] = val
def lowercase ( _a ,_a ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_: List[str] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
UpperCAmelCase_: Dict = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
UpperCAmelCase_: int = torch.cat((q_bias, torch.zeros_like(_a ,requires_grad=_a ), v_bias) )
UpperCAmelCase_: Dict = qkv_bias
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Optional[int] = 364 if "coco" in model_name else 224
UpperCAmelCase_: Union[str, Any] = InstructBlipVisionConfig(image_size=_a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase_: Optional[int] = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_: int = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase_: Optional[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" ,vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase_: int = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" ,vocab_size=32001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase_: Optional[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
UpperCAmelCase_: List[Any] = InstructBlipConfig(vision_config=_a ,text_config=_a ,qformer_config=_a )
return config, image_size
@torch.no_grad()
def lowercase ( _a ,_a=None ,_a=False ) -> Tuple:
UpperCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained("bert-base-uncased" ,truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
UpperCAmelCase_: Union[str, Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" ,truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase_: List[str] = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" ,truncation_side="left" ,bos_token="</s>" ,unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
UpperCAmelCase_ , UpperCAmelCase_: Tuple = get_blipa_config(_a )
UpperCAmelCase_: List[Any] = InstructBlipForConditionalGeneration(_a ).eval()
UpperCAmelCase_: Optional[int] = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
UpperCAmelCase_: List[str] = "cuda:1" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_: List[str] = "cuda:2" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = load_model_and_preprocess(
name=_a ,model_type=_a ,is_eval=_a ,device=_a )
original_model.eval()
print("Done!" )
# update state dict keys
UpperCAmelCase_: Optional[int] = original_model.state_dict()
UpperCAmelCase_: Dict = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a ,_a ,_a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_: Optional[int] = state_dict.pop(_a )
if key.startswith("Qformer.bert" ):
UpperCAmelCase_: List[str] = key.replace("Qformer.bert" ,"qformer" )
if "attention.self" in key:
UpperCAmelCase_: Optional[int] = key.replace("self" ,"attention" )
if "llm_proj" in key:
UpperCAmelCase_: List[Any] = key.replace("llm_proj" ,"language_projection" )
if "t5_proj" in key:
UpperCAmelCase_: int = key.replace("t5_proj" ,"language_projection" )
if key.startswith("llm_model" ):
UpperCAmelCase_: Optional[Any] = key.replace("llm_model" ,"language_model" )
if key.startswith("t5" ):
UpperCAmelCase_: List[Any] = key.replace("t5" ,"language" )
UpperCAmelCase_: int = val
# read in qv biases
read_in_q_v_bias(_a ,_a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_a ,strict=_a )
UpperCAmelCase_: Optional[int] = load_demo_image()
UpperCAmelCase_: int = "What is unusual about this image?"
# create processor
UpperCAmelCase_: Dict = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=_a ,image_std=_a )
UpperCAmelCase_: List[str] = InstructBlipProcessor(
image_processor=_a ,tokenizer=_a ,qformer_tokenizer=_a ,)
UpperCAmelCase_: Optional[int] = processor(images=_a ,text=_a ,return_tensors="pt" ).to(_a )
# make sure processor creates exact same pixel values
UpperCAmelCase_: int = vis_processors["eval"](_a ).unsqueeze(0 ).to(_a )
UpperCAmelCase_: Any = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_a )
original_model.to(_a )
hf_model.to(_a )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase_: Union[str, Any] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
UpperCAmelCase_: List[Any] = hf_model(**_a ).logits
else:
UpperCAmelCase_: Union[str, Any] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
UpperCAmelCase_: List[str] = tokenizer("\n" ,return_tensors="pt" ).input_ids.to(_a )
UpperCAmelCase_: Union[str, Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
UpperCAmelCase_: Optional[Any] = hf_model(**_a ,labels=_a ).logits
print("First values of original logits:" ,original_logits[0, :3, :3] )
print("First values of HF logits:" ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase_: Dict = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,_a ,atol=_a )
print("Looks ok!" )
print("Generating with original model..." )
UpperCAmelCase_: str = original_model.generate({"image": original_pixel_values, "prompt": prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
UpperCAmelCase_: List[Any] = hf_model.generate(
**_a ,do_sample=_a ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase_: int = 2
print("Original generation:" ,_a )
UpperCAmelCase_: List[str] = processor.batch_decode(_a ,skip_special_tokens=_a )
UpperCAmelCase_: List[Any] = [text.strip() for text in output_text]
print("HF generation:" ,_a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_a )
hf_model.save_pretrained(_a )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
_lowerCAmelCase = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
_lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 306 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A : str = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
A : List[str] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
A : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if rouge_types is None:
lowercase__ = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
lowercase__ = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase )
if use_aggregator:
lowercase__ = scoring.BootstrapAggregator()
else:
lowercase__ = []
for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = scorer.score(_UpperCAmelCase , _UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(_UpperCAmelCase )
else:
scores.append(_UpperCAmelCase )
if use_aggregator:
lowercase__ = aggregator.aggregate()
else:
lowercase__ = {}
for key in scores[0]:
lowercase__ = [score[key] for score in scores]
return result
| 15 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = """cpu"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
SCREAMING_SNAKE_CASE__ : List[Any] = """path-to-your-trained-model"""
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE__ : int = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE__ : Any = torch.rand(1) * 9_9_9
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(2, 7_7, 7_6_8)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE__ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE__ : Optional[Any] = 6_6_6
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE__ : int = {"""generator""": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE__ : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 112 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 544 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "swinv2"
UpperCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , __a=2_24 , __a=4 , __a=3 , __a=96 , __a=[2, 2, 6, 2] , __a=[3, 6, 12, 24] , __a=7 , __a=4.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=0.02 , __a=1e-5 , __a=32 , **__a , ) -> Optional[Any]:
super().__init__(**__a )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(__a )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCamelCase = (0, 0, 0, 0)
| 544 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
def __init__( self , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 242 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase = torch.permute(UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase ):
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
if "metadata" in layer:
__lowerCAmelCase = layer.split("metadata" )
__lowerCAmelCase = "".join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
__lowerCAmelCase = layer.split("kvstore" )
__lowerCAmelCase = "".join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
__lowerCAmelCase = layer.split("/" )
__lowerCAmelCase = "/".join(split_layer[:-1] )
__lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__lowerCAmelCase = "file"
else:
__lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = rename_keys(UpperCamelCase )
__lowerCAmelCase = {}
for k, v in current_block.items():
__lowerCAmelCase = v
__lowerCAmelCase = new_current_block
torch.save(UpperCamelCase , UpperCamelCase )
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Any , UpperCamelCase: str = WEIGHTS_NAME ):
"""simple docstring"""
__lowerCAmelCase = convert_file_size_to_int(UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
__lowerCAmelCase = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
__lowerCAmelCase = flatten_dict(UpperCamelCase , sep="/" )
__lowerCAmelCase = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_key_and_tensorstore_dict(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if curr_real_layer_name in all_layers:
__lowerCAmelCase = content
else:
__lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase = torch.tensor(UpperCamelCase )
__lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase , __lowerCAmelCase = rename_base_flax_keys(tuple(key.split("/" ) ) , UpperCamelCase )
__lowerCAmelCase = "/".join(UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase = os.path.join(
UpperCamelCase , weights_name.replace(".bin" , F"-{len(UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = raw_weights.to(getattr(UpperCamelCase , UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase = os.path.join(UpperCamelCase , weights_name.replace(".bin" , F"-{len(UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for idx, shard in enumerate(UpperCamelCase ):
__lowerCAmelCase = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(UpperCamelCase ):05d}.bin" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase = os.path.join(UpperCamelCase , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
__lowerCAmelCase = shard
for key in shard:
__lowerCAmelCase = shard_file
# Add the metadata
__lowerCAmelCase = {"total_size": total_size}
__lowerCAmelCase = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
__lowerCAmelCase = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + "\n"
f.write(UpperCamelCase )
return metadata, index
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCAmelCase ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
__lowerCAmelCase = TaTokenizer.from_pretrained("t5-small" )
__lowerCAmelCase = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
__lowerCAmelCase = tokenizer(UpperCamelCase , return_tensors="pt" ).input_ids
__lowerCAmelCase = model.generate(UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 719 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'mctct'
def __init__( self : Dict , snake_case__ : Any=8_065 , snake_case__ : Tuple=1_536 , snake_case__ : Union[str, Any]=36 , snake_case__ : List[Any]=6_144 , snake_case__ : Tuple=4 , snake_case__ : List[str]=384 , snake_case__ : List[Any]=920 , snake_case__ : Any=1E-5 , snake_case__ : List[Any]=0.3 , snake_case__ : List[str]="relu" , snake_case__ : List[Any]=0.0_2 , snake_case__ : Optional[Any]=0.3 , snake_case__ : List[str]=0.3 , snake_case__ : List[Any]=1 , snake_case__ : Optional[int]=0 , snake_case__ : Any=2 , snake_case__ : Any=1 , snake_case__ : List[str]=0.3 , snake_case__ : Tuple=1 , snake_case__ : Union[str, Any]=(7,) , snake_case__ : List[Any]=(3,) , snake_case__ : List[Any]=80 , snake_case__ : Optional[int]=1 , snake_case__ : List[str]=None , snake_case__ : int="sum" , snake_case__ : Dict=False , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = layerdrop
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = conv_glu_dim
__lowerCAmelCase = conv_dropout
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
__lowerCAmelCase = conv_channels
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 376 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = '''vision-encoder-decoder'''
__magic_name__ = True
def __init__( self : Union[str, Any] , **lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCAmelCase_ : str = kwargs.pop("encoder" )
UpperCAmelCase_ : Dict = encoder_config.pop("model_type" )
UpperCAmelCase_ : List[Any] = kwargs.pop("decoder" )
UpperCAmelCase_ : Dict = decoder_config.pop("model_type" )
UpperCAmelCase_ : Union[str, Any] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Dict ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : List[str] = self.encoder.to_dict()
UpperCAmelCase_ : List[Any] = self.decoder.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : List[Any] = OrderedDict()
UpperCAmelCase_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
UpperCAmelCase_ : Optional[int] = OrderedDict()
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dummy_input["input_ids"].shape
UpperCAmelCase_ : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCAmelCase_ : List[str] = dummy_input.pop("input_ids" )
UpperCAmelCase_ : List[str] = dummy_input.pop("attention_mask" )
UpperCAmelCase_ : List[Any] = torch.zeros(lowerCAmelCase_ )
return common_inputs
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
pass
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" ) -> OnnxConfig:
UpperCAmelCase_ : Optional[int] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
import numpy as np
a_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase__ :
def __init__( self )-> None:
'''simple docstring'''
lowerCAmelCase__ = np.array(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = np.where(letter == self.SQUARE )
lowerCAmelCase__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
lowerCAmelCase__ = message.replace(" " , "" )
lowerCAmelCase__ = message.replace("j" , "i" )
lowerCAmelCase__ = np.empty((2, len(__UpperCAmelCase )) )
for letter_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape(2 * len(__UpperCAmelCase ) )
lowerCAmelCase__ = ""
for numbers_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = int(second_step[numbers_index * 2] )
lowerCAmelCase__ = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase__ = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
message.replace(" " , "" )
lowerCAmelCase__ = np.empty(2 * len(__UpperCAmelCase ) )
for letter_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape((2, len(__UpperCAmelCase )) )
lowerCAmelCase__ = ""
for numbers_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = int(second_step[0, numbers_index] )
lowerCAmelCase__ = int(second_step[1, numbers_index] )
lowerCAmelCase__ = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = decoded_message + letter
return decoded_message
| 339 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Dict=13 , a_ : Tuple=7 , a_ : List[Any]=True , a_ : Optional[int]=True , a_ : Tuple=True , a_ : List[Any]=True , a_ : Dict=99 , a_ : Dict=24 , a_ : Optional[Any]=2 , a_ : List[Any]=6 , a_ : int=37 , a_ : str="gelu" , a_ : int=0.1 , a_ : Tuple=0.1 , a_ : str=5_12 , a_ : Optional[int]=16 , a_ : Tuple=2 , a_ : Dict=0.02 , a_ : int=3 , a_ : Optional[int]=None , a_ : str=10_00 , ) -> Any:
'''simple docstring'''
a__ : str = parent
a__ : Optional[int] = batch_size
a__ : Tuple = seq_length
a__ : Dict = is_training
a__ : List[Any] = use_input_mask
a__ : Any = use_token_type_ids
a__ : int = use_labels
a__ : Union[str, Any] = vocab_size
a__ : Optional[int] = hidden_size
a__ : Union[str, Any] = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : Any = intermediate_size
a__ : str = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Optional[Any] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Optional[int] = type_vocab_size
a__ : int = type_sequence_label_size
a__ : Any = initializer_range
a__ : Tuple = num_labels
a__ : Union[str, Any] = scope
a__ : Optional[Any] = range_bbox
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ : Optional[Any] = bbox[i, j, 3]
a__ : Tuple = bbox[i, j, 1]
a__ : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ : Tuple = bbox[i, j, 2]
a__ : Optional[int] = bbox[i, j, 0]
a__ : Optional[int] = t
a__ : List[str] = None
if self.use_input_mask:
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a__ : str = None
if self.use_token_type_ids:
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Optional[Any] = None
a__ : List[str] = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self : Optional[Any] , a_ : str , a_ : Any , a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Optional[Any] , a_ : List[Any] , ) -> str:
'''simple docstring'''
a__ : Optional[int] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
a__ : Dict = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
a__ : int = model(a_ , bbox=a_ , token_type_ids=a_ )
a__ : str = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] , a_ : Dict , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : str , a_ : str , a_ : List[str] , a_ : Tuple , ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.num_labels
a__ : List[Any] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
a__ : Optional[Any] = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : int , a_ : Any , a_ : str , a_ : Optional[int] , a_ : Dict , a_ : List[str] , a_ : List[Any] , a_ : str , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
a__ : Dict = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = self.prepare_config_and_inputs()
(
a__
) : Union[str, Any] = config_and_inputs
a__ : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : str = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : int = False
def UpperCAmelCase ( self : str , a_ : Optional[int] , a_ : Any , a_ : Optional[Any] , a_ : int , a_ : int ) -> List[Any]:
'''simple docstring'''
return True
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
a__ : str = LiltModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*a_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
a__ : Optional[int] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(a_ )
a__ : Any = torch.tensor([[1, 2]] , device=a_ )
a__ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(input_ids=a_ , bbox=a_ )
a__ : Any = torch.Size([1, 2, 7_68] )
a__ : Optional[int] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) ) | 714 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Optional[Any] = WavaVecaPhonemeCTCTokenizer
__lowerCamelCase : int = False
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
a__ : str = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
a__ : Any = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : int = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
def UpperCAmelCase ( self : Optional[int] , a_ : Union[str, Any] , a_ : Tuple=False , a_ : Optional[Any]=20 , a_ : List[Any]=5 ) -> Tuple[str, list]:
'''simple docstring'''
a__ : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a_ )) for i in range(len(a_ ) )]
a__ : List[str] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a_ ) , a_ ) )
if max_length is not None and len(a_ ) > max_length:
a__ : int = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
a__ : str = toks + toks
# toks_str = [t[1] for t in toks]
a__ : int = [t[0] for t in toks]
# Ensure consistency
a__ : List[Any] = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
a__ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a_ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
a__ : Union[str, Any] = " " + output_txt
a__ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
return output_txt, output_ids
def UpperCAmelCase ( self : Optional[int] , **a_ : Tuple ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
a__ : int = tokenizer("m xxx ɪ" , do_phonemize=a_ ).input_ids
self.assertEqual(a_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
a__ : Union[str, Any] = tokenizer("m aaa ɪ ccc" , do_phonemize=a_ ).input_ids
self.assertEqual(a_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
a__ : Tuple = tokenizer("maɪ c" , do_phonemize=a_ ).input_ids
self.assertEqual(a_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Union[str, Any] = "Hello how are you"
a__ : List[Any] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(a_ , "h ə l oʊ h aʊ ɑːɹ j uː" )
def UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Optional[int] = "Hello how are you"
a__ : Optional[int] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(a_ ).input_ids , tokenizer(a_ , do_phonemize=a_ ).input_ids )
def UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Any = "Hello how are you"
a__ : Optional[int] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
a__ : Optional[int] = tokenizer.decode(tokenizer(a_ ).input_ids )
self.assertEqual(a_ , a_ )
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
a__ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
a__ : Union[str, Any] = tokenizer.decode(sample_ids[0] )
a__ : Tuple = tokenizer.batch_decode(a_ )
self.assertEqual(a_ , batch_tokens[0] )
self.assertEqual(a_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : List[Any] = "Hello how are you"
a__ : Union[str, Any] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(a_ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : Any = "Hello how are you"
a__ : Optional[int] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(a_ ).input_ids , tokenizer(a_ , do_phonemize=a_ ).input_ids )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
a__ : List[str] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
a__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
a__ : List[Any] = tokenizer.decode(sample_ids[0] )
a__ : Tuple = tokenizer.batch_decode(a_ )
self.assertEqual(a_ , batch_tokens[0] )
self.assertEqual(a_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
a__ : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a_ )
a__ : Dict = tokenizer.batch_decode(a_ , filter_word_delimiter_token=a_ )
self.assertEqual(a_ , batch_tokens[0] )
self.assertEqual(a_ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : Optional[Any] = "Hello how are you"
a__ : Union[str, Any] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
a__ : str = tokenizer.decode(tokenizer(a_ ).input_ids , filter_word_delimiter_token=a_ )
self.assertEqual(a_ , a_ )
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : Optional[int] = "Hello how are you"
a__ : str = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
a__ : Dict = tokenizer.decode(tokenizer(a_ ).input_ids , filter_word_delimiter_token=a_ )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , a_ )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=a_ )
a__ : List[str] = "Hello how are you"
a__ : Tuple = tokenizer(a_ , phonemizer_lang="en-us" ).input_ids
a__ : Tuple = tokenizer(a_ , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(a_ , a_ )
a__ : int = tokenizer.decode(a_ )
a__ : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_ , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(a_ , "ɛ l o h aʊ a ʁ j u" )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Tuple = "Hello how Are you"
a__ : List[Any] = "hello how are you"
a__ : Optional[Any] = tokenizer(a_ ).input_ids
a__ : int = tokenizer(a_ ).input_ids
self.assertEqual(a_ , a_ )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
a__ : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
a__ : Any = tokenizer.batch_decode(a_ )
self.assertEqual(a_ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def UpperCAmelCase ( a_ : Optional[Any] , a_ : List[str] ) -> Any:
'''simple docstring'''
a__ : Optional[Any] = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
a__ : Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
a__ : str = tokenizer.decode(a_ , output_char_offsets=a_ , filter_word_delimiter_token=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(a_ , a_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
a__ : List[str] = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(a_ : Optional[Any] , a_ : Union[str, Any] ):
self.assertTrue(isinstance(a_ , a_ ) )
self.assertTrue(isinstance(outputs_list[0] , a_ ) )
# transform list to ModelOutput
a__ : List[str] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(a_ : int , a_ : str ):
if isinstance(a_ , a_ ):
[recursive_check(a_ , a_ ) for la, la in zip(a_ , a_ )]
self.assertEqual(a_ , a_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
a__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
a__ : Union[str, Any] = tokenizer.batch_decode(a_ , output_char_offsets=a_ )
a__ : List[Any] = [tokenizer.decode(a_ , output_char_offsets=a_ ) for ids in sample_ids]
check_list_tuples_equal(a_ , a_ )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
a__ : Optional[int] = tokenizer.vocab_size
a__ : List[Any] = len(a_ )
self.assertNotEqual(a_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
a__ : List[str] = tokenizer.add_tokens(a_ )
a__ : Tuple = tokenizer.vocab_size
a__ : Dict = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size + len(a_ ) )
a__ : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a__ : Tuple = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
a__ : List[Any] = tokenizer.add_special_tokens(a_ )
a__ : List[str] = tokenizer.vocab_size
a__ : List[Any] = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size_a + len(a_ ) )
a__ : List[str] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizers(fast=a_ , do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
a__ : Union[str, Any] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
a__ : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(output["text"] , a_ ) | 251 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''distilbert'''
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , lowerCamelCase=3_05_22 , lowerCamelCase=5_12 , lowerCamelCase=False , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=7_68 , lowerCamelCase=4 * 7_68 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=0.02 , lowerCamelCase=0.1 , lowerCamelCase=0.2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Any:
'''simple docstring'''
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : List[str] = sinusoidal_pos_embds
UpperCamelCase : List[str] = n_layers
UpperCamelCase : int = n_heads
UpperCamelCase : Tuple = dim
UpperCamelCase : List[Any] = hidden_dim
UpperCamelCase : Any = dropout
UpperCamelCase : Optional[Any] = attention_dropout
UpperCamelCase : str = activation
UpperCamelCase : Any = initializer_range
UpperCamelCase : Union[str, Any] = qa_dropout
UpperCamelCase : Dict = seq_classif_dropout
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase )
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''roberta'''
def __init__( self , lowerCamelCase=5_02_65 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Any = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 | 1 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase_ ( lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def UpperCamelCase_ ( lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def UpperCamelCase_ ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase_ : Any = np.zeros_like(lowerCAmelCase__ )
lowerCAmelCase_ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase_ : int = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase_ : Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase_ : Union[str, Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ : List[Any] = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowercase__ : Any = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ : Optional[int] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ : int = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ : Dict = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 317 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = SwinConfig()
lowerCAmelCase_ : List[str] = swin_name.split('_' )
lowerCAmelCase_ : Optional[int] = name_split[1]
lowerCAmelCase_ : List[str] = int(name_split[4] )
lowerCAmelCase_ : Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
lowerCAmelCase_ : int = 96
lowerCAmelCase_ : Any = (2, 2, 6, 2)
lowerCAmelCase_ : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : str = 96
lowerCAmelCase_ : Optional[int] = (2, 2, 18, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Dict = 128
lowerCAmelCase_ : Optional[int] = (2, 2, 18, 2)
lowerCAmelCase_ : str = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Union[str, Any] = 192
lowerCAmelCase_ : Optional[int] = (2, 2, 18, 2)
lowerCAmelCase_ : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCAmelCase_ : Tuple = 2_1841
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : List[str] = 'huggingface/label-files'
lowerCAmelCase_ : Union[str, Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ : Any = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Dict = idalabel
lowerCAmelCase_ : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = img_size
lowerCAmelCase_ : str = num_classes
lowerCAmelCase_ : str = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : int = window_size
return config
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : int = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowerCAmelCase_ : Any = 'encoder.' + name
if "attn.proj" in name:
lowerCAmelCase_ : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase_ : int = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase_ : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase_ : List[str] = 'layernorm.bias'
if "head" in name:
lowerCAmelCase_ : List[str] = name.replace('head' , 'classifier' )
else:
lowerCAmelCase_ : Optional[Any] = 'swin.' + name
return name
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Dict = orig_state_dict.pop(lowerCAmelCase__ )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Optional[int] = key.split('.' )
lowerCAmelCase_ : Optional[int] = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : str = val[:dim, :]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase_ : int = val[-dim:, :]
else:
lowerCAmelCase_ : Any = val[
:dim
]
lowerCAmelCase_ : int = val[
dim : dim * 2
]
lowerCAmelCase_ : int = val[
-dim:
]
else:
lowerCAmelCase_ : str = val
return orig_state_dict
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : List[str] = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
lowerCAmelCase_ : Optional[int] = get_swin_config(lowerCAmelCase__ )
lowerCAmelCase_ : int = SwinForImageClassification(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Any = convert_state_dict(timm_model.state_dict() , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
lowerCAmelCase_ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
lowerCAmelCase_ : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
lowerCAmelCase_ : int = timm_model(inputs['pixel_values'] )
lowerCAmelCase_ : Tuple = model(**lowerCAmelCase__ ).logits
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase__ : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 317 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["prompt", "negative_prompt"])
_SCREAMING_SNAKE_CASE = frozenset([])
_SCREAMING_SNAKE_CASE = frozenset(["image"])
_SCREAMING_SNAKE_CASE = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["image"])
_SCREAMING_SNAKE_CASE = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["prompt", "image", "negative_prompt"])
_SCREAMING_SNAKE_CASE = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
_SCREAMING_SNAKE_CASE = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["image", "mask_image"])
_SCREAMING_SNAKE_CASE = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["example_image", "image", "mask_image"])
_SCREAMING_SNAKE_CASE = frozenset(["class_labels"])
_SCREAMING_SNAKE_CASE = frozenset(["class_labels"])
_SCREAMING_SNAKE_CASE = frozenset(["batch_size"])
_SCREAMING_SNAKE_CASE = frozenset([])
_SCREAMING_SNAKE_CASE = frozenset(["batch_size"])
_SCREAMING_SNAKE_CASE = frozenset([])
_SCREAMING_SNAKE_CASE = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
_SCREAMING_SNAKE_CASE = frozenset(["prompt", "negative_prompt"])
_SCREAMING_SNAKE_CASE = frozenset(["input_tokens"])
_SCREAMING_SNAKE_CASE = frozenset(["input_tokens"])
| 366 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str ) -> str:
'''simple docstring'''
if not (isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase = i
__lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def A_ ( a , a=1_0_0_0 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE_ : int = n - 1
SCREAMING_SNAKE_CASE_ : List[str] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE_ : List[str] = 0
while count < prec:
SCREAMING_SNAKE_CASE_ : Tuple = random.randint(2 , n - 1 )
SCREAMING_SNAKE_CASE_ : List[Any] = bin_exp_mod(a , a , a )
if b != 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for _ in range(a ):
if b == n - 1:
SCREAMING_SNAKE_CASE_ : Dict = False
break
SCREAMING_SNAKE_CASE_ : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 705 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
def A_ ( a , a ):
"""simple docstring"""
if os.path.exists(a ):
if os.path.exists(os.path.join(a , 'config.json' ) ) and os.path.isfile(
os.path.join(a , 'config.json' ) ):
os.remove(os.path.join(a , 'config.json' ) )
if os.path.exists(os.path.join(a , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(a , 'pytorch_model.bin' ) ):
os.remove(os.path.join(a , 'pytorch_model.bin' ) )
else:
os.makedirs(a )
model.save_pretrained(a )
def A_ ( a , a=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 2
if unlogit:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.pow(a , a )
SCREAMING_SNAKE_CASE_ : Optional[int] = p * torch.log(a )
SCREAMING_SNAKE_CASE_ : List[str] = 0
return -plogp.sum(dim=-1 )
def A_ ( a ):
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(f"{x + 1}" for x in range(len(a ) ) ) )
for row in range(len(a ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '\t'.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '\t'.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def A_ ( a , a , a , a=True , a=True , a=None , a=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros(a , a ).to(args.device )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros(a , a ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : str = torch.ones(a , a ).to(args.device )
head_mask.requires_grad_(requires_grad=a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : int = 0.0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(a , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE_ : Any = tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE_) , ) : str = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE_ : List[Any] = model(a , labels=a , head_mask=a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(a ):
SCREAMING_SNAKE_CASE_ : Tuple = entropy(attn.detach() , a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE_ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(a )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(a )
logger.info('Head ranked by importance scores' )
SCREAMING_SNAKE_CASE_ : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
SCREAMING_SNAKE_CASE_ : Tuple = torch.arange(
head_importance.numel() , device=args.device )
SCREAMING_SNAKE_CASE_ : List[str] = head_ranks.view_as(a )
print_ad_tensor(a )
return attn_entropy, head_importance, total_loss
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = compute_heads_importance(a , a , a , compute_entropy=a )
SCREAMING_SNAKE_CASE_ : int = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , a , original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.ones_like(a )
SCREAMING_SNAKE_CASE_ : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE_ : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE_ : List[Any] = float('Inf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(a ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
SCREAMING_SNAKE_CASE_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE_ : int = new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE_ : int = 0.0
SCREAMING_SNAKE_CASE_ : Optional[int] = new_head_mask.view_as(a )
SCREAMING_SNAKE_CASE_ : int = new_head_mask.clone().detach()
print_ad_tensor(a )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = compute_heads_importance(
a , a , a , compute_entropy=a , head_mask=a )
SCREAMING_SNAKE_CASE_ : str = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(a )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = datetime.now()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 / loss
SCREAMING_SNAKE_CASE_ : int = datetime.now() - before_time
SCREAMING_SNAKE_CASE_ : str = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE_ : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) )
}
for k, v in heads_to_prune.items():
if isinstance(a , a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [
v,
]
assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(a )
SCREAMING_SNAKE_CASE_ : List[str] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE_ : Dict = datetime.now()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 / loss
SCREAMING_SNAKE_CASE_ : Any = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , a , a , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , a , a )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(a , args.output_dir )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=a , type=a , required=a , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=a , type=a , required=a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=a , type=a , required=a , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=a , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=a , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=a , type=a , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=a , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=a , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=a , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=a , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=a , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=a , help='Batch size.' )
parser.add_argument('--seed' , type=a , default=4_2 )
parser.add_argument('--local_rank' , type=a , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=a , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=a , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
SCREAMING_SNAKE_CASE_ : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE_ : Tuple = torch.device('cuda' , args.local_rank )
SCREAMING_SNAKE_CASE_ : str = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE_ : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE_ : Dict = nn.parallel.DistributedDataParallel(
a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE_ : int = nn.DataParallel(a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=a )
torch.save(a , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , a )
# Prepare dataset
SCREAMING_SNAKE_CASE_ : int = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE_ : Tuple = (torch.from_numpy(a ),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorDataset(*a )
SCREAMING_SNAKE_CASE_ : Dict = RandomSampler(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(a , sampler=a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(a , a , a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_heads(a , a , a )
prune_heads(a , a , a , a )
if __name__ == "__main__":
main()
| 353 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowerCamelCase : List[str] = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowerCamelCase : Dict = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowerCamelCase : str = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowerCamelCase : Union[str, Any] = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__=0.9 , lowercase__=3 , lowercase__=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version('''3.6.5''' ):
__A =[
meteor_score.single_meteor_score(
word_tokenize(lowercase__ ) , word_tokenize(lowercase__ ) , alpha=lowercase__ , beta=lowercase__ , gamma=lowercase__ )
for ref, pred in zip(lowercase__ , lowercase__ )
]
else:
__A =[
meteor_score.single_meteor_score(lowercase__ , lowercase__ , alpha=lowercase__ , beta=lowercase__ , gamma=lowercase__ )
for ref, pred in zip(lowercase__ , lowercase__ )
]
return {"meteor": np.mean(lowercase__ )}
| 184 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_snake_case = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_snake_case = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_snake_case = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_snake_case = field(
default=A__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_snake_case = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_snake_case = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_snake_case = field(default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_snake_case = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_snake_case = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_snake_case = field(
default=A__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_snake_case = field(
default=A__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case = field(
default=A__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_snake_case = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_snake_case = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def A__ ( self ) -> Optional[int]:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_snake_case = 42
_snake_case = True
_snake_case = None
_snake_case = None
def __call__( self , snake_case_ ) -> Dict:
__lowerCAmelCase = """label""" if """label""" in features[0].keys() else """labels"""
__lowerCAmelCase = [feature.pop(snake_case_ ) for feature in features]
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = len(features[0]["""input_ids"""] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
__lowerCAmelCase = list(chain(*snake_case_ ) )
__lowerCAmelCase = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def lowercase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = load_dataset(
_lowerCAmelCase , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f"""ending{i}""" for i in range(4 )]
__lowerCAmelCase = """sent1"""
__lowerCAmelCase = """sent2"""
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__lowerCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCAmelCase ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowerCAmelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*_lowerCAmelCase ) )
__lowerCAmelCase = list(chain(*_lowerCAmelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
_lowerCAmelCase , _lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(_lowerCAmelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__lowerCAmelCase = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(_lowerCAmelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__lowerCAmelCase = eval_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(_lowerCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
__lowerCAmelCase = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("""train""" , _lowerCAmelCase )
trainer.save_metrics("""train""" , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
__lowerCAmelCase = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("""eval""" , _lowerCAmelCase )
trainer.save_metrics("""eval""" , _lowerCAmelCase )
__lowerCAmelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def lowercase (_lowerCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 573 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''pegasus'''
_snake_case = ['''past_key_values''']
_snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , snake_case_=50_265 , snake_case_=1_024 , snake_case_=12 , snake_case_=4_096 , snake_case_=16 , snake_case_=12 , snake_case_=4_096 , snake_case_=16 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=1_024 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=1 , **snake_case_ , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
@property
def A__ ( self ) -> int:
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
return self.d_model
| 573 | 1 |
import colorsys
from PIL import Image # type: ignore
def __UpperCAmelCase ( __a : float ,__a : float ,__a : int ) -> float:
"""simple docstring"""
_a : Dict = x
_a : Dict = y
for step in range(__a ): # noqa: B007
_a : Optional[Any] = a * a - b * b + x
_a : Dict = 2 * a * b + y
_a : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCAmelCase ( __a : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __UpperCAmelCase ( __a : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__a ,1 ,1 ) )
def __UpperCAmelCase ( __a : int = 800 ,__a : int = 600 ,__a : float = -0.6 ,__a : float = 0 ,__a : float = 3.2 ,__a : int = 50 ,__a : bool = True ,) -> Image.Image:
"""simple docstring"""
_a : List[str] = Image.new('''RGB''' ,(image_width, image_height) )
_a : List[Any] = img.load()
# loop through the image-coordinates
for image_x in range(__a ):
for image_y in range(__a ):
# determine the figure-coordinates based on the image-coordinates
_a : List[Any] = figure_width / image_width * image_height
_a : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a : List[str] = get_distance(__a ,__a ,__a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a : Optional[Any] = get_color_coded_rgb(__a )
else:
_a : Optional[int] = get_black_and_white_rgb(__a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __A( __UpperCAmelCase ):
__A = "deta"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, A=None, A=900, A=2048, A=6, A=2048, A=8, A=6, A=1024, A=8, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=1.0, A=True, A=False, A="sine", A=5, A=4, A=4, A=True, A=300, A=True, A=True, A=1, A=5, A=2, A=1, A=1, A=5, A=2, A=0.1, A=0.25, **A, ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(A, A ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(A )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=A, **A )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.d_model
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 716 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Tuple = {"""vocab_file""": """vocab.json"""}
lowercase : int = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
lowercase : int = {"""mgp-str""": 27}
class __A( __UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, A, A="[GO]", A="[GO]", A="[s]", A="[GO]", **A ):
"""simple docstring"""
super().__init__(
unk_token=A, bos_token=A, eos_token=A, pad_token=A, **A, )
with open(A, encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(A )
_UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return len(self.vocab )
def _UpperCamelCase ( self ):
"""simple docstring"""
return dict(self.vocab, **self.added_tokens_encoder )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = []
for s in text:
char_tokens.extend(A )
return char_tokens
def _UpperCamelCase ( self, A ):
"""simple docstring"""
return self.vocab.get(A, self.vocab.get(self.unk_token ) )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
return self.decoder.get(A )
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
_UpperCamelCase = os.path.join(
A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(A, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=A, ensure_ascii=A ) + '''\n''' )
return (vocab_file,)
| 105 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ (_a ):
UpperCAmelCase__ : List[str] = (DDIMParallelScheduler,)
UpperCAmelCase__ : List[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def lowerCamelCase__( self :List[str] ,**__snake_case :Tuple ) -> List[Any]:
a__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def lowerCamelCase__( self :int ,**__snake_case :Optional[Any] ) -> Optional[Any]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(**__lowerCAmelCase )
a__ = scheduler_class(**__lowerCAmelCase )
a__ = 10, 0.0
a__ = self.dummy_model()
a__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
a__ = model(__lowerCAmelCase ,__lowerCAmelCase )
a__ = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def lowerCamelCase__( self :List[str] ) -> List[str]:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def lowerCamelCase__( self :Dict ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config(steps_offset=1 )
a__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowerCamelCase__( self :Any ) -> str:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def lowerCamelCase__( self :List[Any] ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def lowerCamelCase__( self :Dict ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def lowerCamelCase__( self :Dict ) -> Tuple:
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 5_00] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def lowerCamelCase__( self :Optional[Any] ) -> str:
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 ,4_00 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 ,9_60 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ,4_86 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ,9_98 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = self.scheduler_classes[0]
a__ = self.get_scheduler_config()
a__ = scheduler_class(**__lowerCAmelCase )
a__ = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
a__ = self.dummy_model()
a__ = self.dummy_sample_deter
a__ = self.dummy_sample_deter + 0.1
a__ = self.dummy_sample_deter - 0.1
a__ = samplea.shape[0]
a__ = torch.stack([samplea, samplea, samplea] ,dim=0 )
a__ = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
a__ = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
a__ = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
a__ = torch.sum(torch.abs(__lowerCAmelCase ) )
a__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def lowerCamelCase__( self :str ) -> Dict:
a__ = self.full_loop()
a__ = torch.sum(torch.abs(__lowerCAmelCase ) )
a__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def lowerCamelCase__( self :Any ) -> str:
a__ = self.full_loop(prediction_type='v_prediction' )
a__ = torch.sum(torch.abs(__lowerCAmelCase ) )
a__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
a__ = torch.sum(torch.abs(__lowerCAmelCase ) )
a__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def lowerCamelCase__( self :str ) -> Optional[Any]:
a__ = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
a__ = torch.sum(torch.abs(__lowerCAmelCase ) )
a__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 335 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,) | 46 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
snake_case_ : Union[str, Any] = 'base_with_context'
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : str):
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding''']))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding''']), requires_grad=_UpperCAmelCase)
for lyr_num, lyr in enumerate(model.encoders):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale''']))
UpperCamelCase = ly_weight['''attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale''']))
return model
def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : int):
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding''']), requires_grad=_UpperCAmelCase)
for lyr_num, lyr in enumerate(model.encoders):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = ly_weight['''attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale''']))
return model
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Dict):
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding''']), requires_grad=_UpperCAmelCase)
UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T))
for lyr_num, lyr in enumerate(model.decoders):
UpperCamelCase = weights[f'layers_{lyr_num}']
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T))
UpperCamelCase = ly_weight['''self_attention''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T))
UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T))
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale''']))
UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T))
return model
def __snake_case ( _UpperCAmelCase : Optional[Any]):
UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path)
UpperCamelCase = jnp.tree_util.tree_map(onp.array, _UpperCAmelCase)
UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
UpperCamelCase = os.path.join(args.checkpoint_path, '''..''', '''config.gin''')
UpperCamelCase = inference.parse_training_gin_file(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = inference.InferenceModel(args.checkpoint_path, _UpperCAmelCase)
UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''')
UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], _UpperCAmelCase)
UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], _UpperCAmelCase)
UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''], _UpperCAmelCase)
UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''')
UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=_UpperCAmelCase, continuous_encoder=_UpperCAmelCase, decoder=_UpperCAmelCase, scheduler=_UpperCAmelCase, melgan=_UpperCAmelCase, )
if args.save:
pipe.save_pretrained(args.output_path)
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
snake_case_ : List[Any] = parser.parse_args()
main(args)
| 350 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=True , lowerCamelCase__=1 / 2_5_5 , lowerCamelCase__=True , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase = self.size['''shortest_edge''']
UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase = self.size['''shortest_edge''']
UpperCamelCase = self.size['''shortest_edge''']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
UpperCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCamelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
UpperCamelCase = DeformableDetrImageProcessor()
UpperCamelCase = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase__ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase__ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase__ ) )
# verify class_labels
UpperCamelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase__ ) )
# verify orig_size
UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase__ ) )
# verify size
UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase__ ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase__ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase__ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase__ ) )
# verify class_labels
UpperCamelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase__ ) )
# verify masks
UpperCamelCase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase__ )
# verify orig_size
UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase__ ) )
# verify size
UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase__ ) )
| 350 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : int = """canine"""
def __init__(self : Optional[int] , UpperCamelCase : Dict=768 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=3072 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : int=16384 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : int=0.02 , UpperCamelCase : Tuple=1E-12 , UpperCamelCase : int=0 , UpperCamelCase : Tuple=0xE_0_0_0 , UpperCamelCase : List[Any]=0xE_0_0_1 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=4 , UpperCamelCase : int=8 , UpperCamelCase : str=16384 , UpperCamelCase : Dict=128 , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Character config:
lowercase__ = downsampling_rate
lowercase__ = upsampling_kernel_size
lowercase__ = num_hash_functions
lowercase__ = num_hash_buckets
lowercase__ = local_transformer_stride
| 460 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(A , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 460 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
_lowerCAmelCase =['image_processor', 'tokenizer']
_lowerCAmelCase ='Pix2StructImageProcessor'
_lowerCAmelCase =('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
snake_case__ : Tuple = False
super().__init__(_a , _a )
def __call__( self : Tuple , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Union[bool, str, TruncationStrategy] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = 2_0_4_8 , _lowerCamelCase : int = 0 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[str, TensorType]] = None , **_lowerCamelCase : Dict , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
snake_case__ : str = self.tokenizer
snake_case__ : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
snake_case__ : Optional[int] = self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a )
else:
# add pixel_values and bbox
snake_case__ : List[str] = self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a )
if text is not None and not self.image_processor.is_vqa:
snake_case__ : Optional[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
snake_case__ : Union[str, Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
snake_case__ : Tuple = text_encoding.pop('input_ids' )
else:
snake_case__ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def UpperCAmelCase__ ( self : Optional[int] , *_lowerCamelCase : List[str] , **_lowerCamelCase : str ):
return self.tokenizer.batch_decode(*_a , **_a )
def UpperCAmelCase__ ( self : Tuple , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ):
return self.tokenizer.decode(*_a , **_a )
@property
def UpperCAmelCase__ ( self : str ):
snake_case__ : Optional[int] = self.tokenizer.model_input_names
snake_case__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 705 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase : str = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = symbols('ct x y z')
def lowercase__( A ):
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def lowercase__( A ):
return 1 / sqrt(1 - beta(A ) ** 2 )
def lowercase__( A ):
return np.array(
[
[gamma(A ), -gamma(A ) * beta(A ), 0, 0],
[-gamma(A ) * beta(A ), gamma(A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase__( A , A = None ):
# Ensure event is not empty
if event is None:
snake_case__ : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase : Dict = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowerCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase : Dict = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 303 | 0 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCAmelCase = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
_lowerCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() )
| 580 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case = '''src/diffusers'''
_snake_case = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case = spec.loader.load_module()
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE ) is not None
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = object_name.split('.' )
_lowerCAmelCase = 0
# First let's find the module where our object lives.
_lowerCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
_snake_case = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_snake_case = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_snake_case = re.compile(R'''<FILL\s+[^>]*>''')
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = code.split('\n' )
_lowerCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
_lowerCAmelCase = f"""class Bla:\n{code}"""
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len('class Bla:\n' ) :] if has_indent else result
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: List[str]=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = []
_lowerCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = search.groups()
_lowerCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_indent(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase = theoretical_indent
_lowerCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
_lowerCAmelCase = ''.join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = replace_pattern.replace('with' , '' ).split(',' )
_lowerCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pattern.groups()
_lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
_lowerCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def __snake_case ( SCREAMING_SNAKE_CASE: bool = False ):
"""simple docstring"""
_lowerCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = []
for filename in all_files:
_lowerCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 580 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCAmelCase_ ( __a : Optional[int] , __a : List[str]=10 ):
'''simple docstring'''
_lowerCamelCase : Any = []
for _ in range(__snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCAmelCase_ ( __a : str , __a : Union[str, Any]=10 ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
for step in range(__snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Union[str, Any] = os.path.join(__snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , __snake_case )
_lowerCamelCase : List[Any] = torch.load(__snake_case )
scheduler.load_state_dict(__snake_case )
return lrs
@require_torch
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self , A , A , A ):
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
_lowerCamelCase : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
_lowerCamelCase : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_lowerCamelCase : List[str] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_lowerCamelCase : str = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
_lowerCamelCase : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
_lowerCamelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_lowerCamelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A , weight_decay=0.0 , relative_step=A , scale_parameter=A , warmup_init=A , )
for _ in range(1000 ):
_lowerCamelCase : Any = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class A_(unittest.TestCase ):
"""simple docstring"""
a_ : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
a_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
a_ : str = 10
def _lowerCAmelCase ( self , A , A , A , A=None ):
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A , msg=A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_lowerCamelCase : Dict = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
_lowerCamelCase : List[Any] = data
_lowerCamelCase : List[str] = scheduler_func(self.optimizer , **A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_lowerCamelCase : int = unwrap_schedule(A , self.num_steps )
self.assertListAlmostEqual(
A , A , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
_lowerCamelCase : str = scheduler_func(self.optimizer , **A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(A ) # wrap to test picklability of the schedule
_lowerCamelCase : Dict = unwrap_and_save_reload_schedule(A , self.num_steps )
self.assertListEqual(A , A , msg=F"failed for {scheduler_func} in save and reload" )
class A_:
"""simple docstring"""
def __init__( self , A ):
_lowerCamelCase : Any = fn
def __call__( self , *A , **A ):
return self.fn(*A , **A )
@classmethod
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : Dict = list(map(self , scheduler.lr_lambdas ) )
| 713 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
a_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : str = Github(os.environ['GITHUB_TOKEN'] )
_lowerCamelCase : Optional[Any] = g.get_repo('huggingface/accelerate' )
_lowerCamelCase : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCamelCase : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
_lowerCamelCase : Tuple = comments[0] if len(__a ) > 0 else None
_lowerCamelCase : Optional[Any] = dt.utcnow()
_lowerCamelCase : int = (current_time - issue.updated_at).days
_lowerCamelCase : Tuple = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 349 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__SCREAMING_SNAKE_CASE : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCamelCase_ (A__ ):
'''simple docstring'''
__UpperCamelCase: Dict = """whisper"""
__UpperCamelCase: Tuple = ["""past_key_values"""]
__UpperCamelCase: Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , A : str=51865 , A : List[Any]=80 , A : List[Any]=6 , A : Optional[Any]=4 , A : Tuple=6 , A : Union[str, Any]=4 , A : List[Any]=1536 , A : Union[str, Any]=1536 , A : Union[str, Any]=0.0 , A : Optional[Any]=0.0 , A : Dict=50257 , A : List[str]=True , A : Optional[int]=True , A : int="gelu" , A : Optional[int]=256 , A : int=0.0 , A : Optional[int]=0.0 , A : Any=0.0 , A : str=0.02 , A : str=False , A : Optional[int]=1500 , A : Optional[int]=448 , A : List[str]=50256 , A : Optional[int]=50256 , A : List[str]=50256 , A : List[Any]=None , A : Optional[int]=[220, 50256] , A : Tuple=False , A : List[Any]=256 , A : Union[str, Any]=False , A : List[Any]=0.05 , A : Any=10 , A : int=2 , A : Tuple=0.0 , A : Dict=10 , A : str=0 , A : Union[str, Any]=7 , **A : Tuple , ):
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Optional[Any] = num_mel_bins
_UpperCAmelCase : Tuple = d_model
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Optional[int] = encoder_attention_heads
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : List[Any] = decoder_attention_heads
_UpperCAmelCase : int = decoder_ffn_dim
_UpperCAmelCase : Any = encoder_ffn_dim
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Union[str, Any] = init_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : str = max_source_positions
_UpperCAmelCase : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : int = classifier_proj_size
_UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : Tuple = apply_spec_augment
_UpperCAmelCase : List[str] = mask_time_prob
_UpperCAmelCase : Any = mask_time_length
_UpperCAmelCase : Optional[Any] = mask_time_min_masks
_UpperCAmelCase : Optional[Any] = mask_feature_prob
_UpperCAmelCase : List[Any] = mask_feature_length
_UpperCAmelCase : str = mask_feature_min_masks
_UpperCAmelCase : Dict = median_filter_width
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , )
class lowerCamelCase_ (A__ ):
'''simple docstring'''
@property
def _A ( self : Any ):
_UpperCAmelCase : List[Any] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase : Tuple = {0: "batch"}
else:
_UpperCAmelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
return common_inputs
def _A ( self : List[Any] , A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 22050 , A : float = 5.0 , A : int = 220 , ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
_UpperCAmelCase : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , )
_UpperCAmelCase : Optional[int] = encoder_inputs["input_features"].shape[2]
_UpperCAmelCase : Any = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase : int = super().generate_dummy_inputs(
preprocessor.tokenizer , a_ , a_ , a_ , a_ )
_UpperCAmelCase : Any = encoder_inputs.pop("input_features" )
_UpperCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
_UpperCAmelCase : Optional[Any] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _A ( self : str ):
return 1E-3
| 244 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
_UpperCamelCase : Tuple = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_UpperCamelCase : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
_UpperCamelCase : List[str] = primes[:idx]
break
_UpperCamelCase : List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_UpperCamelCase : List[str] = False
for r in range(UpperCamelCase__ ):
_UpperCamelCase : Union[str, Any] = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_UpperCamelCase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 720 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
_UpperCamelCase : Any = ""
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : Dict = []
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase : Optional[int] = self.__min_dist_top_down_dp(lowerCAmelCase__ , n - 1 )
_UpperCamelCase : Any = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase : List[str] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = worda
_UpperCamelCase : List[str] = worda
_UpperCamelCase : Union[str, Any] = [[-1 for _ in range(len(lowerCAmelCase__ ) )] for _ in range(len(lowerCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase__ ) - 1 , len(lowerCAmelCase__ ) - 1 )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Any = worda
_UpperCamelCase : Optional[int] = worda
_UpperCamelCase : Tuple = len(lowerCAmelCase__ )
_UpperCamelCase : Tuple = len(lowerCAmelCase__ )
_UpperCamelCase : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase : List[Any] = j
elif j == 0: # second string is empty
_UpperCamelCase : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase : Tuple = self.dp[i - 1][j - 1]
else:
_UpperCamelCase : List[str] = self.dp[i][j - 1]
_UpperCamelCase : Any = self.dp[i - 1][j]
_UpperCamelCase : Optional[int] = self.dp[i - 1][j - 1]
_UpperCamelCase : Optional[Any] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_SCREAMING_SNAKE_CASE = input("""Enter the first string: """).strip()
_SCREAMING_SNAKE_CASE = input("""Enter the second string: """).strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 239 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase_ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self , A , A , A = None , A = None ) -> Tuple:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
_SCREAMING_SNAKE_CASE = os.path.abspath("""examples""" )
for item in os.listdir(A ):
if item not in EXCLUDE_EXAMPLES:
_SCREAMING_SNAKE_CASE = os.path.join(A , A )
if os.path.isfile(A ) and ".py" in item_path:
with self.subTest(
tested_script=A , feature_script=A , tested_section="""main()""" if parser_only else """training_function()""" , ):
_SCREAMING_SNAKE_CASE = compare_against_test(
os.path.join(A , A ) , A , A , A )
_SCREAMING_SNAKE_CASE = """\n""".join(A )
if special_strings is not None:
for string in special_strings:
_SCREAMING_SNAKE_CASE = diff.replace(A , """""" )
self.assertEqual(A , """""" )
def snake_case_( self ) -> str:
self.one_complete_example("""complete_nlp_example.py""" , A )
self.one_complete_example("""complete_nlp_example.py""" , A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
_SCREAMING_SNAKE_CASE = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , A , A , A )
self.one_complete_example("""complete_cv_example.py""" , A , A , A )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = False
@classmethod
def snake_case_( cls ) -> Tuple:
super().setUpClass()
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_SCREAMING_SNAKE_CASE = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def snake_case_( cls ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
_SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
_SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=A )
self.assertNotIn("""epoch 0:""" , A )
self.assertIn("""epoch 1:""" , A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
_SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=A )
if torch.cuda.is_available():
_SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
_SCREAMING_SNAKE_CASE = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , A )
self.assertIn("""epoch 1:""" , A )
else:
self.assertIn("""epoch 0:""" , A )
self.assertIn("""epoch 1:""" , A )
@slow
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
_SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=A )
_SCREAMING_SNAKE_CASE = re.findall("""({.+})""" , A )
_SCREAMING_SNAKE_CASE = [r for r in results if """accuracy""" in r][-1]
_SCREAMING_SNAKE_CASE = ast.literal_eval(A )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def snake_case_( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
_SCREAMING_SNAKE_CASE = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A , """tracking""" ) ) )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 314 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("""T""")
class a_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = len(A )
_SCREAMING_SNAKE_CASE = [any_type for _ in range(self.N )] + arr
_SCREAMING_SNAKE_CASE = fnc
self.build()
def snake_case_( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> None:
p += self.N
_SCREAMING_SNAKE_CASE = v
while p > 1:
_SCREAMING_SNAKE_CASE = p // 2
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> T | None: # noqa: E741
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = l + self.N, r + self.N
_SCREAMING_SNAKE_CASE = None
while l <= r:
if l % 2 == 1:
_SCREAMING_SNAKE_CASE = self.st[l] if res is None else self.fn(A , self.st[l] )
if r % 2 == 0:
_SCREAMING_SNAKE_CASE = self.st[r] if res is None else self.fn(A , self.st[r] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase ( ) ->None:
for i in range(len(__lowerCamelCase ) ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert max_range == max_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert sum_range == sum_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 314 | 1 |
import re
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
if len(re.findall("[ATCG]" , UpperCamelCase ) ) != len(UpperCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a :
def __init__( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = data
__lowerCAmelCase = None
class a :
def __init__( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = None
def __iter__( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = self.head
while self.head:
yield node.data
__lowerCAmelCase = node.next
if node == self.head:
break
def __len__( self : Dict ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : str ):
"""simple docstring"""
return "->".join(str(snake_case__ ) for item in iter(self ) )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : int , snake_case__ : Any ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
__lowerCAmelCase = Node(snake_case__ )
if self.head is None:
__lowerCAmelCase = new_node # first node points itself
__lowerCAmelCase = __lowerCAmelCase = new_node
elif index == 0: # insert at head
__lowerCAmelCase = self.head
__lowerCAmelCase = __lowerCAmelCase = new_node
else:
__lowerCAmelCase = self.head
for _ in range(index - 1 ):
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next
__lowerCAmelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCAmelCase = new_node
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase__ ( self : str , snake_case__ : int = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
__lowerCAmelCase = self.head
if self.head == self.tail: # just one node
__lowerCAmelCase = __lowerCAmelCase = None
elif index == 0: # delete head node
__lowerCAmelCase = self.tail.next.next
__lowerCAmelCase = self.head.next
else:
__lowerCAmelCase = self.head
for _ in range(index - 1 ):
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next
__lowerCAmelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCAmelCase = temp
return delete_node.data
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return len(self ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = CircularLinkedList()
assert len(UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase ) == i
circular_linked_list.insert_nth(UpperCamelCase , i + 1 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 | 1 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __A ( a_ :int) -> Union[str, Any]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 0.9_9_9_9 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 2 / 3 , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
__a : Optional[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
__a : List[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__a : List[Any] = True
if kwargs.get('''max_value''' , _UpperCAmelCase ) is not None:
__a : int = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
__a : Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , _UpperCAmelCase ) is not None:
__a : Tuple = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
__a : Dict = kwargs['''min_value''']
__a : Union[str, Any] = list(_UpperCAmelCase )
__a : Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _UpperCAmelCase ) is not None:
__a : Dict = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
self.to(device=kwargs['''device'''] )
__a : Union[str, Any] = None
__a : Dict = decay
__a : Tuple = min_decay
__a : List[Any] = update_after_step
__a : Any = use_ema_warmup
__a : int = inv_gamma
__a : int = power
__a : Optional[Any] = 0
__a : Any = None # set in `step()`
__a : Tuple = model_cls
__a : Union[str, Any] = model_config
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : Optional[int] = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
__a : Any = model_cls.from_pretrained(_UpperCAmelCase )
__a : int = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(_UpperCAmelCase )
return ema_model
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
__a : Tuple = self.model_cls.from_config(self.model_config )
__a : Any = self.state_dict()
state_dict.pop('''shadow_params''' , _UpperCAmelCase )
model.register_to_config(**_UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__a : Union[str, Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__a : Tuple = (1 + step) / (10 + step)
__a : Any = min(_UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
__a : Tuple = max(_UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _lowerCamelCase ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
__a : Any = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
__a : Union[str, Any] = parameters.parameters()
__a : List[Any] = list(_UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__a : List[str] = self.get_decay(self.optimization_step )
__a : List[str] = decay
__a : Dict = 1 - decay
__a : int = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__a : Tuple = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[Any] = list(_UpperCAmelCase )
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__a : Tuple = [
p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase )
for p in self.shadow_params
]
def _lowerCamelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Dict = [param.detach().cpu().clone() for param in parameters]
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
__a : Optional[int] = None
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = copy.deepcopy(_UpperCAmelCase )
__a : Optional[Any] = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
__a : Any = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _UpperCAmelCase ):
raise ValueError('''Invalid min_decay''' )
__a : Optional[int] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _UpperCAmelCase ):
raise ValueError('''Invalid optimization_step''' )
__a : Union[str, Any] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _UpperCAmelCase ):
raise ValueError('''Invalid update_after_step''' )
__a : Dict = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _UpperCAmelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
__a : Tuple = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
__a : Optional[int] = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
__a : int = state_dict.get('''shadow_params''' , _UpperCAmelCase )
if shadow_params is not None:
__a : str = shadow_params
if not isinstance(self.shadow_params , _UpperCAmelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' ) | 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 52 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
snake_case_ : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
snake_case_ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str]=100, SCREAMING_SNAKE_CASE__ : Union[str, Any]=" " ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE__ )
return [character.join(text[i : i + n] ).strip() for i in range(0, len(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ )]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict ) -> dict:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = [], []
for title, text in zip(documents['''title'''], documents['''text'''] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE__ ):
titles.append(title if title is not None else '''''' )
texts.append(SCREAMING_SNAKE_CASE__ )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : dict, SCREAMING_SNAKE_CASE__ : DPRContextEncoder, SCREAMING_SNAKE_CASE__ : DPRContextEncoderTokenizerFast ) -> dict:
UpperCAmelCase_ : int = ctx_tokenizer(
documents['''title'''], documents['''text'''], truncation=SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )['''input_ids''']
UpperCAmelCase_ : int = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE__ ), return_dict=SCREAMING_SNAKE_CASE__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : "RagExampleArguments", SCREAMING_SNAKE_CASE__ : "ProcessingArguments", SCREAMING_SNAKE_CASE__ : "IndexHnswArguments", ) -> Optional[int]:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : int = load_dataset(
'''csv''', data_files=[rag_example_args.csv_path], split='''train''', delimiter='''\t''', column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : List[str] = dataset.map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : List[str] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : Optional[int] = dataset.map(
partial(SCREAMING_SNAKE_CASE__, ctx_encoder=SCREAMING_SNAKE_CASE__, ctx_tokenizer=SCREAMING_SNAKE_CASE__ ), batched=SCREAMING_SNAKE_CASE__, batch_size=processing_args.batch_size, features=SCREAMING_SNAKE_CASE__, )
# And finally save your dataset
UpperCAmelCase_ : Tuple = os.path.join(rag_example_args.output_dir, '''my_knowledge_dataset''' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''', custom_index=SCREAMING_SNAKE_CASE__ )
# And save the index
UpperCAmelCase_ : str = os.path.join(rag_example_args.output_dir, '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(SCREAMING_SNAKE_CASE__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__a : str = field(
default=str(Path(lowerCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__a : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__a : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__a : Optional[str] = field(
default=str(Path(lowerCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __a :
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__a : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __a :
__a : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__a : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
snake_case_ : int = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 644 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[int] = int(nums[0] )
UpperCAmelCase_ : List[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 644 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Union[str, Any] , snake_case__ :int=1024 , snake_case__ :List[str]=1024 , snake_case__ :int=False , **snake_case__ :Tuple ) -> Tuple:
_lowercase = AutoTokenizer.from_pretrained(snake_case__ )
_lowercase = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path='train' , **snake_case__ )
_lowercase = tok.pad_token_id
def get_lens(snake_case__ :Optional[Any] ):
_lowercase = tqdm(
DataLoader(snake_case__ , batch_size=512 , num_workers=8 , shuffle=snake_case__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowercase = []
for batch in dl:
_lowercase = batch['input_ids'].ne(snake_case__ ).sum(1 ).tolist()
_lowercase = batch['labels'].ne(snake_case__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case__ , snake_case__ ):
max_lens.append(max(snake_case__ , snake_case__ ) )
else:
max_lens.extend(snake_case__ )
return max_lens
_lowercase = get_lens(snake_case__ )
_lowercase = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path='val' , **snake_case__ )
_lowercase = get_lens(snake_case__ )
pickle_save(snake_case__ , train_ds.len_file )
pickle_save(snake_case__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 67 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowerCAmelCase : Union[str, Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCAmelCase_ ( snake_case__ ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ = R'.*/layers_(\d+)'
lowerCAmelCase__ = key
if re.match(snake_case__ , snake_case__ ):
lowerCAmelCase__ = re.sub(R'layers_(\d+)' , R'block/\1/layer' , snake_case__ )
lowerCAmelCase__ = R'(encoder|decoder)\/'
if re.match(snake_case__ , snake_case__ ):
lowerCAmelCase__ = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase__ = re.sub(R'/mlp/' , R'/1/mlp/' , snake_case__ )
lowerCAmelCase__ = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , snake_case__ )
elif groups[0] == "decoder":
lowerCAmelCase__ = re.sub(R'/mlp/' , R'/2/mlp/' , snake_case__ )
lowerCAmelCase__ = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase__ = new_key.replace(snake_case__ , snake_case__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase__ = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase__ = s_dict[key].shape[0]
lowerCAmelCase__ = s_dict[key]
for idx in range(snake_case__ ):
lowerCAmelCase__ = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(snake_case__ )
return s_dict
_lowerCAmelCase : Union[str, Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
import regex as re
with open(snake_case__ , 'r' ) as f:
lowerCAmelCase__ = f.read()
lowerCAmelCase__ = re.findall(R'(.*) = ([0-9.]*)' , snake_case__ )
lowerCAmelCase__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase__ = float(snake_case__ ) if '.' in value else int(snake_case__ )
lowerCAmelCase__ = re.findall(R'(.*activations) = \(\'(.*)\',\)' , snake_case__ )[0]
lowerCAmelCase__ = str(activation[1] )
lowerCAmelCase__ = num_experts
lowerCAmelCase__ = SwitchTransformersConfig(**snake_case__ )
return config
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ) -> List[str]:
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
lowerCAmelCase__ = convert_gin_to_config(snake_case__ , snake_case__ )
else:
lowerCAmelCase__ = SwitchTransformersConfig.from_pretrained(snake_case__ )
lowerCAmelCase__ = SwitchTransformersForConditionalGeneration(snake_case__ )
lowerCAmelCase__ = flax_params['target']
lowerCAmelCase__ = flatten_dict(snake_case__ , sep='/' )
lowerCAmelCase__ = rename_keys(snake_case__ )
lowerCAmelCase__ = unflatten_dict(snake_case__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 604 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 42
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self ,a_ = 16 ,a_ = 88 ,a_ = None ,a_ = None ,a_ = 1 ,a_ = 0.0 ,a_ = 32 ,a_ = None ,a_ = False ,a_ = None ,a_ = "geglu" ,a_ = True ,a_ = True ,):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=a_ ,num_channels=a_ ,eps=1e-6 ,affine=a_ )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
a_ ,a_ ,a_ ,dropout=a_ ,cross_attention_dim=a_ ,activation_fn=a_ ,attention_bias=a_ ,double_self_attention=a_ ,norm_elementwise_affine=a_ ,)
for d in range(a_ )
] )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,a_=None ,a_=None ,a_=1 ,a_=None ,a_ = True ,):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(a_ ,a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,2 ,1 ,3 ,4 )
lowerCAmelCase__ = self.norm(a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,a_ ,a_ )
lowerCAmelCase__ = self.proj_in(a_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,class_labels=a_ ,)
# 3. Output
lowerCAmelCase__ = self.proj_out(a_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(a_ ,a_ ,a_ ,a_ ,a_ )
.permute(0 ,3 ,4 ,1 ,2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a_ )
| 604 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModel.from_pretrained(a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModel.from_pretrained(a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelForPreTraining.from_pretrained(a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelForPreTraining.from_pretrained(a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> Any:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelForCausalLM.from_pretrained(a , from_pt=a )
snake_case_ , snake_case_ = TFAutoModelForCausalLM.from_pretrained(
a , output_loading_info=a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelForCausalLM.from_pretrained(a , from_tf=a )
snake_case_ , snake_case_ = AutoModelForCausalLM.from_pretrained(
a , output_loading_info=a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelWithLMHead.from_pretrained(a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelForMaskedLM.from_pretrained(a , from_pt=a )
snake_case_ , snake_case_ = TFAutoModelForMaskedLM.from_pretrained(
a , output_loading_info=a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelForMaskedLM.from_pretrained(a , from_tf=a )
snake_case_ , snake_case_ = AutoModelForMaskedLM.from_pretrained(
a , output_loading_info=a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(a , from_pt=a )
snake_case_ , snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(
a , output_loading_info=a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(a , from_tf=a )
snake_case_ , snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(
a , output_loading_info=a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case_ = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = TFAutoModelForQuestionAnswering.from_pretrained(a , from_pt=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
snake_case_ = AutoModelForQuestionAnswering.from_pretrained(a , from_tf=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a )
self.assertIsInstance(a , a )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 )
snake_case_ = AutoModelWithLMHead.from_pretrained(a , from_tf=a )
self.assertIsInstance(a , a )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 )
def _UpperCamelCase ( self ) -> str:
snake_case_ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a )
self.assertIsInstance(a , a )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 )
snake_case_ = AutoModelWithLMHead.from_pretrained(a , from_tf=a )
self.assertIsInstance(a , a )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 )
| 198 |
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> Tuple:
snake_case_ = n
snake_case_ = [None] * self.n
snake_case_ = 0 # index of the first element
snake_case_ = 0
snake_case_ = 0
def __len__( self ) -> int:
return self.size
def _UpperCamelCase ( self ) -> bool:
return self.size == 0
def _UpperCamelCase ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def _UpperCamelCase ( self , a ) -> Dict:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
snake_case_ = data
snake_case_ = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCamelCase ( self ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
snake_case_ = self.array[self.front]
snake_case_ = None
snake_case_ = (self.front + 1) % self.n
self.size -= 1
return temp
| 198 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class a ( a_ ):
UpperCAmelCase_ : List[str] ="blip_2_vision_model"
def __init__( self , _lowerCamelCase=1_4_0_8 , _lowerCamelCase=6_1_4_4 , _lowerCamelCase=3_9 , _lowerCamelCase=1_6 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_0_0_0_1 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-10 , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = qkv_bias
@classmethod
def UpperCamelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ):
cls._set_token_in_kwargs(_lowerCamelCase )
lowercase , lowercase = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class a ( a_ ):
UpperCAmelCase_ : Optional[int] ="blip_2_qformer"
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=2 , _lowerCamelCase=1_4_0_8 , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = cross_attention_frequency
lowercase = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ):
cls._set_token_in_kwargs(_lowerCamelCase )
lowercase , lowercase = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class a ( a_ ):
UpperCAmelCase_ : Optional[int] ="blip-2"
UpperCAmelCase_ : Optional[Any] =True
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=3_2 , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
if vision_config is None:
lowercase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowercase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowercase = BlipaVisionConfig(**_lowerCamelCase )
lowercase = BlipaQFormerConfig(**_lowerCamelCase )
lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
lowercase = self.text_config.tie_word_embeddings
lowercase = self.text_config.is_encoder_decoder
lowercase = num_query_tokens
lowercase = self.vision_config.hidden_size
lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase = 1.0
lowercase = 0.0_2
@classmethod
def UpperCamelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def UpperCamelCase_ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.vision_config.to_dict()
lowercase = self.qformer_config.to_dict()
lowercase = self.text_config.to_dict()
lowercase = self.__class__.model_type
return output
| 134 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCamelCase : int = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_UpperCamelCase : Optional[int] = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase = bs[:]
lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
lowercase = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
return pairs
class a ( a_ ):
UpperCAmelCase_ : Dict =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] =["input_ids", "attention_mask"]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ):
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(_lowerCamelCase )
lowercase = {v: k for k, v in self.encoder.items()}
lowercase = errors # how to handle errors in decoding
lowercase = bytes_to_unicode()
lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='utf-8' ) as merges_handle:
lowercase = merges_handle.read().split('\n' )[1:-1]
lowercase = [tuple(merge.split() ) for merge in bpe_merges]
lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowercase = {}
lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase_ ( self ):
return len(self.encoder )
def UpperCamelCase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
lowercase = tuple(_lowerCamelCase )
lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase = bigram
lowercase = []
lowercase = 0
while i < len(_lowerCamelCase ):
try:
lowercase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(_lowerCamelCase )
lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
lowercase = get_pairs(_lowerCamelCase )
lowercase = ' '.join(_lowerCamelCase )
lowercase = word
return word
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = []
for token in re.findall(self.pat , _lowerCamelCase ):
lowercase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(' ' ) )
return bpe_tokens
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = ''.join(_lowerCamelCase )
lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '\n' )
lowercase = 0
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowercase = token_index
writer.write(' '.join(_lowerCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
lowercase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
lowercase = ' ' + text
return (text, kwargs)
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
return token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
lowercase = ' '.join(_lowerCamelCase )
lowercase = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
lowercase = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 134 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase="divided_space_time" , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = attention_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_UpperCAmelCase = self.num_labels
return config
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TimesformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TimesformerForVideoClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify the logits shape
_UpperCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A : Union[str, Any] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : List[str] = False
__A : int = False
__A : str = False
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
_UpperCAmelCase = copy.deepcopy(_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TimesformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.num_frames
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_UpperCAmelCase = len(_UpperCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCamelCase( self ):
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_UpperCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def A__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_UpperCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''gpt_neox_japanese'''
def __init__( self : Union[str, Any] , __UpperCamelCase : str=3_2_0_0_0 , __UpperCamelCase : List[Any]=2_5_6_0 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : List[Any]=1.00 , __UpperCamelCase : Any=1_0_0_0_0 , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : List[str]=1E-5 , __UpperCamelCase : str=True , __UpperCamelCase : str=3_1_9_9_6 , __UpperCamelCase : int=3_1_9_9_9 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Tuple=0.0 , **__UpperCamelCase : List[str] , ):
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 272 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def __magic_name__( _A ):
'''simple docstring'''
def decorator(_A ):
UpperCamelCase__ = getattr(_A , """handle_key""" , [] )
handle += [key]
setattr(_A , """handle_key""" , _A )
return func
return decorator
def __magic_name__( *_A ):
'''simple docstring'''
def decorator(_A ):
UpperCamelCase__ = getattr(_A , """handle_key""" , [] )
handle += keys
setattr(_A , """handle_key""" , _A )
return func
return decorator
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __new__( cls : int , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = super().__new__(cls , lowercase , lowercase , lowercase )
if not hasattr(lowercase , """key_handler""" ):
setattr(lowercase , """key_handler""" , {} )
setattr(lowercase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase__ = getattr(lowercase , """handle_key""" , [] )
for key in handled_keys:
UpperCamelCase__ = value
return new_cls
@staticmethod
def A ( cls : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase__ = ord(lowercase )
UpperCamelCase__ = cls.key_handler.get(lowercase )
if handler:
UpperCamelCase__ = char
return handler(cls )
else:
return None
def __magic_name__( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 265 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : str = field(default="audio-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"audio": Audio()} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def A ( self : List[Any] , lowercase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 265 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __snake_case ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 188 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a__ : Optional[int] = TypeVar('T')
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self ,__snake_case = True ):
"""simple docstring"""
A_ = {} # dictionary of lists
A_ = directed
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
self.adj_list[destination_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
A_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__snake_case )
A_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A_ = [destination_vertex]
A_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
A_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A_ = [destination_vertex]
A_ = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 188 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def snake_case__ ( _lowerCamelCase ) ->List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = SwinConfig(
embed_dim=1_92, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=["stage2", "stage3", "stage4"], )
__lowercase : int = DetaConfig(
backbone_config=_lowerCamelCase, num_queries=9_00, encoder_ffn_dim=20_48, decoder_ffn_dim=20_48, num_feature_levels=5, assign_first_stage=_lowerCamelCase, with_box_refine=_lowerCamelCase, two_stage=_lowerCamelCase, )
# set labels
__lowercase : str = "huggingface/label-files"
if "o365" in model_name:
__lowercase : Optional[int] = 3_66
__lowercase : Optional[int] = "object365-id2label.json"
else:
__lowercase : Any = 91
__lowercase : Any = "coco-detection-id2label.json"
__lowercase : List[str] = num_labels
__lowercase : str = json.load(open(cached_download(hf_hub_url(_lowerCamelCase, _lowerCamelCase, repo_type="dataset" ) ), "r" ) )
__lowercase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase : List[str] = idalabel
__lowercase : str = {v: k for k, v in idalabel.items()}
return config
def snake_case__ ( _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : int = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = dct.pop(_lowerCamelCase )
__lowercase : Optional[int] = val
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : str = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase : Any = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Dict = in_proj_weight[:dim, :]
__lowercase : Optional[Any] = in_proj_bias[: dim]
__lowercase : List[str] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : List[str] = in_proj_bias[
dim : dim * 2
]
__lowercase : List[str] = in_proj_weight[
-dim :, :
]
__lowercase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : Tuple = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowercase : Optional[Any] = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__lowercase : Any = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Dict = in_proj_weight[:hidden_size, :]
__lowercase : str = in_proj_bias[:hidden_size]
__lowercase : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowercase : Any = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase : Optional[int] = in_proj_weight[-hidden_size:, :]
__lowercase : Optional[int] = in_proj_bias[-hidden_size:]
def snake_case__ ( ) ->Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase : str = Image.open(requests.get(_lowerCamelCase, stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Dict:
"""simple docstring"""
__lowercase : List[str] = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__lowercase : Optional[Any] = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__lowercase : str = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'Model name {model_name} not supported' )
__lowercase : List[Any] = torch.load(_lowerCamelCase, map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase, param.shape )
# rename keys
__lowercase : Union[str, Any] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase, _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowercase : Tuple = state_dict.pop(_lowerCamelCase )
__lowercase : int = val
if "input_proj" in key:
__lowercase : List[str] = state_dict.pop(_lowerCamelCase )
__lowercase : Optional[int] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowercase : Union[str, Any] = state_dict.pop(_lowerCamelCase )
__lowercase : str = val
# finally, create HuggingFace model and load state dict
__lowercase : int = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__lowercase : str = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_lowerCamelCase )
# load image processor
__lowercase : str = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__lowercase : List[str] = prepare_img()
__lowercase : Any = processor(images=_lowerCamelCase, return_tensors="pt" )
__lowercase : Optional[int] = encoding["pixel_values"]
__lowercase : str = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print("Logits:", outputs.logits[0, :3, :3] )
print("Boxes:", outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowercase : Dict = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
__lowercase : Optional[Any] = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
__lowercase : Union[str, Any] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
__lowercase : Dict = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(_lowerCamelCase ), atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(_lowerCamelCase ), atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : Any=True , lowercase__ : int=True , lowercase__ : List[Any]=True , lowercase__ : Union[str, Any]=True , lowercase__ : Any=9_9 , lowercase__ : Tuple=[1, 1, 2] , lowercase__ : str=1 , lowercase__ : Union[str, Any]=3_2 , lowercase__ : int=4 , lowercase__ : Dict=8 , lowercase__ : Tuple=3_7 , lowercase__ : int="gelu_new" , lowercase__ : Tuple=0.1 , lowercase__ : int=0.1 , lowercase__ : Dict=0.0 , lowercase__ : int=5_1_2 , lowercase__ : str=3 , lowercase__ : List[Any]=0.0_2 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=None , lowercase__ : List[Any]=False , ):
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : List[Any] = is_training
__lowercase : Tuple = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : Union[str, Any] = block_sizes
__lowercase : Optional[Any] = num_decoder_layers
__lowercase : str = d_model
__lowercase : Tuple = n_head
__lowercase : Any = d_head
__lowercase : Dict = d_inner
__lowercase : Optional[Any] = hidden_act
__lowercase : int = hidden_dropout
__lowercase : int = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : int = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Union[str, Any] = 2
__lowercase : Optional[int] = num_labels
__lowercase : List[str] = num_choices
__lowercase : List[Any] = scope
__lowercase : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase : str = n_head
# Used in the tests to check the size of the first hidden state
__lowercase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase : Dict = self.num_hidden_layers + 2
def snake_case ( self : List[Any] ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = None
if self.use_input_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = None
__lowercase : str = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Union[str, Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : Optional[Any] = [input_ids, input_mask]
__lowercase : int = model(lowercase__ )
__lowercase : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : int = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : List[str] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : str = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any] , ):
__lowercase : List[str] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : List[Any] = [input_ids, input_mask]
__lowercase : Optional[int] = model(lowercase__ )
__lowercase : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase : Any = False
__lowercase : Any = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase : List[Any] = False
__lowercase : Optional[int] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[Any] , ):
__lowercase : Tuple = TFFunnelForPreTraining(config=lowercase__ )
__lowercase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , ):
__lowercase : Optional[int] = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : int , ):
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any] , ):
__lowercase : Dict = self.num_choices
__lowercase : List[str] = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase : Any = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[Any] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Tuple = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowercase : str = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Any , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : Tuple = self.num_labels
__lowercase : int = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : int , ):
__lowercase : List[str] = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : str ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Union[str, Any] = False
def snake_case ( self : Dict ):
__lowercase : List[Any] = TFFunnelModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def snake_case ( self : str ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def snake_case ( self : Any ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def snake_case ( self : str ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = TFFunnelModelTester(self , base=lowercase__ )
__lowercase : List[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def snake_case ( self : Union[str, Any] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 281 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCamelCase_ ( unittest.TestCase ):
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = TextaTextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return generator, ["Something to write", "Something else"]
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = generator('''Something there''' )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__magic_name__ :Union[str, Any] = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
] , )
__magic_name__ :List[str] = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
generator(4 )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__magic_name__ :Any = generator('''Something there''' , do_sample=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ''''''}] )
__magic_name__ :str = 3
__magic_name__ :Optional[Any] = generator(
'''Something there''' , num_return_sequences=__lowerCAmelCase , num_beams=__lowerCAmelCase , )
__magic_name__ :str = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = generator('''This is a test''' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__magic_name__ :str = generator.model.config.eos_token_id
__magic_name__ :Dict = '''<pad>'''
__magic_name__ :List[str] = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__magic_name__ :Union[str, Any] = generator('''Something there''' , do_sample=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ''''''}] )
| 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[int] = "▁"
a : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
a : Union[str, Any] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
a : Any = {"vinai/bartpho-syllable": 10_24}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
def __init__( self , snake_case , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : Any = monolingual_vocab_file
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase : Optional[Any] = cnt
cnt += 1
with open(snake_case , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
UpperCAmelCase : List[str] = line.strip().split()[0]
UpperCAmelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
if str(snake_case ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase : int = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Any = None
UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self , snake_case ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self , snake_case ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Optional[int] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(snake_case )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 609 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : int
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__magic_name__ ) )]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase : List[Any] = all_rotations(__magic_name__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__magic_name__ ),
}
return response
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase : Optional[int] = int(__magic_name__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__magic_name__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase : str = [""] * len(__magic_name__ )
for _ in range(len(__magic_name__ ) ):
for i in range(len(__magic_name__ ) ):
UpperCAmelCase : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a : Tuple = "Provide a string that I will generate its BWT transform: "
a : str = input(entry_msg).strip()
a : Tuple = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
a : Any = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 609 | 1 |
'''simple docstring'''
from collections import deque
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =deque()
_UpperCamelCase =[False for _ in range(__SCREAMING_SNAKE_CASE )]
_UpperCamelCase =[-1 for _ in range(__SCREAMING_SNAKE_CASE )]
_UpperCamelCase =index_of[:]
def strong_connect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_UpperCamelCase =index # the number when this node is seen
_UpperCamelCase =index # lowest rank node reachable from here
index += 1
stack.append(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =True
for w in g[v]:
if index_of[w] == -1:
_UpperCamelCase =strong_connect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_UpperCamelCase =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_UpperCamelCase =[]
_UpperCamelCase =stack.pop()
_UpperCamelCase =False
component.append(__SCREAMING_SNAKE_CASE )
while w != v:
_UpperCamelCase =stack.pop()
_UpperCamelCase =False
component.append(__SCREAMING_SNAKE_CASE )
components.append(__SCREAMING_SNAKE_CASE )
return index
_UpperCamelCase =[]
for v in range(__SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE )
return components
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[[] for _ in range(__SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(__SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
__lowerCamelCase : Optional[int] = 7
__lowerCamelCase : Optional[int] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__lowerCamelCase : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__lowerCamelCase : int = [(u, v) for u, v in zip(source, target)]
__lowerCamelCase : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 404 |
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase =j
return prefix_result
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 | 1 |
UpperCAmelCase__ : List[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.