code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowercase ( *_A ) -> str:
if not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE : Dict = list(_A )
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowercase ( _A ) -> bool:
SCREAMING_SNAKE_CASE : List[str] = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(_A , _A ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowercase ( _A = None , _A = 128 ) -> Any:
if function is None:
return functools.partial(_A , starting_batch_size=_A )
SCREAMING_SNAKE_CASE : int = starting_batch_size
def decorator(*_A , **_A ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = list(inspect.signature(_A ).parameters.keys() )
# Guard against user error
if len(_A ) < (len(_A ) + 1):
SCREAMING_SNAKE_CASE : List[str] = """, """.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(_A , *_A , **_A )
except Exception as e:
if should_reduce_batch_size(_A ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 245 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _lowercase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
SCREAMING_SNAKE_CASE : List[str] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(struct.unpack(""">16L""" , UpperCAmelCase__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.padding()
SCREAMING_SNAKE_CASE : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : str = self.expand_block(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : str = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
SCREAMING_SNAKE_CASE : List[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : int = 0XCA_62_C1_D6
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCAmelCase__ , 3_0 ),
c,
d,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = B"""Test String"""
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def __lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE : Tuple = bytes(_A , """utf-8""" )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 245 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : Optional[int] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.4814_5466, 0.457_8275, 0.4082_1073] , lowercase=[0.2686_2954, 0.2613_0258, 0.2757_7711] , lowercase=True , ) -> Dict:
'''simple docstring'''
A__ = size if size is not None else {"height": 224, "width": 224}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_convert_rgb
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase ( self , lowercase=False , lowercase=False , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A__ = []
for i in range(self.batch_size ):
A__ , A__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A__ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
if torchify:
A__ = [torch.from_numpy(lowercase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase )
@property
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_convert_rgb" ) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase )
A__ = 3
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_convert_rgb" ) )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 68 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="conditional_detr"
lowerCamelCase : Dict =["past_key_values"]
lowerCamelCase : List[Any] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , a : Optional[int]=True , a : str=None , a : Union[str, Any]=3 , a : int=3_00 , a : int=6 , a : Optional[int]=20_48 , a : str=8 , a : Optional[Any]=6 , a : Any=20_48 , a : Dict=8 , a : List[str]=0.0 , a : Any=0.0 , a : List[str]=True , a : Tuple="relu" , a : int=2_56 , a : int=0.1 , a : Optional[int]=0.0 , a : List[Any]=0.0 , a : List[str]=0.02 , a : str=1.0 , a : List[str]=False , a : Any="sine" , a : List[str]="resnet50" , a : Dict=True , a : Dict=False , a : int=2 , a : Optional[int]=5 , a : Dict=2 , a : Any=1 , a : Dict=1 , a : int=2 , a : str=5 , a : str=2 , a : Union[str, Any]=0.25 , **a : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(a , a ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(a )
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = cls_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[str] =version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return 12
| 237 | '''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCAmelCase ={
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int =["input_ids", "attention_mask"]
lowerCamelCase : Tuple =TaTokenizer
lowerCamelCase : List[int] =[]
def __init__( self : int , a : List[str]=None , a : List[str]=None , a : Dict="</s>" , a : Optional[int]="<unk>" , a : Any="<pad>" , a : Optional[Any]=1_00 , a : List[Any]=None , **a : Union[str, Any] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__lowerCamelCase = [f"""<extra_id_{i}>""" for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowerCamelCase = len(set(filter(lambda a : bool('''extra_id_''' in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
a , tokenizer_file=a , eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
__lowerCamelCase = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[int] , a : List[str] , a : Union[str, Any] ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowerCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , a , )
return max_model_length
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self : int , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowerCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return list(
set(filter(lambda a : bool(re.search(R'''<extra_id_\d+>''' , a ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return [self.convert_tokens_to_ids(a ) for token in self.get_sentinel_tokens()]
| 237 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = True , UpperCAmelCase : float = math.inf , UpperCAmelCase : float = -math.inf , UpperCAmelCase : float = math.inf , UpperCAmelCase : float = -math.inf , UpperCAmelCase : bool = False , UpperCAmelCase : float = 100 , UpperCAmelCase : float = 0.01 , UpperCAmelCase : float = 1 , ) -> Any:
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Optional[Any] = start_temperate
UpperCAmelCase : str = []
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[str] = None
while not search_end:
UpperCAmelCase : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : Tuple = current_state
scores.append(a__ )
iterations += 1
UpperCAmelCase : Tuple = None
UpperCAmelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : List[str] = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
UpperCAmelCase : str = neighbors.pop(a__ )
UpperCAmelCase : str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : Optional[Any] = picked_neighbor
else:
UpperCAmelCase : int = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : int = picked_neighbor
UpperCAmelCase : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : List[Any] = True
else:
UpperCAmelCase : List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase : Tuple = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Optional[int] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
return (3 * x**2) - (6 * y)
_lowerCamelCase : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"""{local_min.score()}"""
)
_lowerCamelCase : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"""{local_min.score()}"""
)
| 336 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_ = do_convert_rgb
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(__UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 122 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) -> str:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase_ ,n - 1 ,UpperCamelCase_ ) * a) % mod
else:
snake_case = binary_exponentiation(UpperCamelCase_ ,n / 2 ,UpperCamelCase_ )
return (b * b) % mod
# a prime number
_SCREAMING_SNAKE_CASE : Dict = 7_01
_SCREAMING_SNAKE_CASE : Union[str, Any] = 10_00_00_00_00
_SCREAMING_SNAKE_CASE : Optional[int] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 368 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 213 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self , a__ ) -> Dict:
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as input_file:
snake_case_ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
snake_case_ = input_file.read()
snake_case_ = regexp.search(a__ )
return match
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as input_file:
snake_case_ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
snake_case_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
snake_case_ = regexp.finditer(a__ )
snake_case_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Path("./datasets" )
snake_case_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a__ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Path("./datasets" )
snake_case_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a__ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 85 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase="" , __UpperCAmelCase="train" ):
'''simple docstring'''
assert os.path.isdir(__UpperCAmelCase )
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :List[Any] = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.documents[idx]
lowerCAmelCase__ :List[Any] = document_path.split('/' )[-1]
with open(__UpperCAmelCase , encoding='utf-8' ) as source:
lowerCAmelCase__ :List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :List[str] = list(filter(lambda _SCREAMING_SNAKE_CASE : len(_SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ :Optional[Any] = [_add_missing_period(_SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :Union[str, Any] = deque(_SCREAMING_SNAKE_CASE )
while True:
try:
lowerCAmelCase__ :int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ :List[Any] = list(filter(lambda _SCREAMING_SNAKE_CASE : not t.startswith('@highlight' ) , _SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_SCREAMING_SNAKE_CASE )) )
return sequence
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = torch.ones_like(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = sequence == pad_token_id
lowerCAmelCase__ :int = 0
return mask
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :Any = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in story_lines]
lowerCAmelCase__ :Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ :str = [tokenizer.encode(_SCREAMING_SNAKE_CASE ) for line in summary_lines]
lowerCAmelCase__ :Optional[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Dict = []
for sequence in batch:
lowerCAmelCase__ :str = -1
lowerCAmelCase__ :Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_SCREAMING_SNAKE_CASE )
return torch.tensor(_SCREAMING_SNAKE_CASE )
| 254 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 6008_5147_5143 ):
try:
A = int(__lowercase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A = 2
A = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A = i
while n % i == 0:
A = n // i
i += 1
return int(__lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 74 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def a_ ( __lowercase : np.ndarray ) -> np.ndarray:
_snake_case , _snake_case , _snake_case = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def a_ ( __lowercase : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray ) -> np.ndarray:
_snake_case = np.zeros_like(__lowercase )
_snake_case = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_snake_case = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_snake_case = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_snake_case = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[int] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
_lowerCamelCase : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
_lowerCamelCase : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowerCamelCase : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowerCamelCase : List[Any] = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''') | 130 |
import random
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def A ( lowercase : str ):
'''simple docstring'''
_snake_case = [ord(lowercase ) for i in text]
_snake_case = []
_snake_case = []
for i in plain:
_snake_case = random.randint(1 , 300 )
_snake_case = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def A ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
_snake_case = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : Optional[int] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k)) | 130 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 204 |
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCamelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase ) )
return round(lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase_ ( _lowercase : str) -> List[str]:
"""simple docstring"""
a__ : List[Any] = torch.exp(_lowercase)
a__ : Union[str, Any] = torch.sum(_lowercase , dim=1) # sum of exp(x_i)
a__ : Optional[Any] = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(_lowercase) - B / A
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> List[str]:
"""simple docstring"""
super().__init__()
a__ : Tuple = config.output_attentions
a__ : Tuple = config.output_hidden_states
a__ : Tuple = nn.ModuleList([BertLayer(__lowercase ) for _ in range(config.num_hidden_layers )] )
a__ : int = nn.ModuleList([BertHighway(__lowercase ) for _ in range(config.num_hidden_layers )] )
a__ : Optional[Any] = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
if (type(__lowercase ) is float) or (type(__lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
a__ : List[str] = x
else:
a__ : Dict = x
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Dict:
"""simple docstring"""
a__ : Any = ()
a__ : Any = ()
a__ : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
a__ : Tuple = all_hidden_states + (hidden_states,)
a__ : Optional[int] = layer_module(
__lowercase , __lowercase , head_mask[i] , __lowercase , __lowercase )
a__ : str = layer_outputs[0]
if self.output_attentions:
a__ : Optional[Any] = all_attentions + (layer_outputs[1],)
a__ : Dict = (hidden_states,)
if self.output_hidden_states:
a__ : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
a__ : Union[str, Any] = current_outputs + (all_attentions,)
a__ : List[Any] = self.highway[i](__lowercase )
# logits, pooled_output
if not self.training:
a__ : Optional[int] = highway_exit[0]
a__ : List[Any] = entropy(__lowercase )
a__ : Tuple = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
a__ : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
a__ : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowercase , i + 1 )
else:
a__ : List[str] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
a__ : Dict = all_hidden_states + (hidden_states,)
a__ : List[str] = (hidden_states,)
if self.output_hidden_states:
a__ : str = outputs + (all_hidden_states,)
if self.output_attentions:
a__ : Optional[Any] = outputs + (all_attentions,)
a__ : Optional[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , A__ , )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> int:
"""simple docstring"""
super().__init__(__lowercase )
a__ : Dict = config
a__ : int = BertEmbeddings(__lowercase )
a__ : List[Any] = DeeBertEncoder(__lowercase )
a__ : List[Any] = BertPooler(__lowercase )
self.init_weights()
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : List[str] = value
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
a__ : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
a__ : Dict = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
a__ : List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a__ : Any = torch.ones(__lowercase , device=__lowercase )
if encoder_attention_mask is None:
a__ : Optional[int] = torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
a__ : Dict = torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a__ : torch.Tensor = self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
a__ : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
a__ : Union[str, Any] = encoder_attention_mask[:, None, None, :]
a__ : Optional[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
a__ : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a__ : Union[str, Any] = self.get_head_mask(__lowercase , self.config.num_hidden_layers )
a__ : Optional[int] = self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
a__ : Dict = self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
a__ : int = encoder_outputs[0]
a__ : str = self.pooler(__lowercase )
a__ : int = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : str = message
a__ : Union[str, Any] = exit_layer # start from 1!
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Tuple:
"""simple docstring"""
super().__init__()
a__ : Union[str, Any] = BertPooler(__lowercase )
a__ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
a__ : str = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : List[str] = encoder_outputs[0]
a__ : int = self.pooler(__lowercase )
# "return" pooler_output
# BertModel
a__ : int = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
a__ : Dict = bmodel_output[1]
a__ : str = self.dropout(__lowercase )
a__ : Optional[int] = self.classifier(__lowercase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , A__ , )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__lowercase )
a__ : List[str] = config.num_labels
a__ : Tuple = config.num_hidden_layers
a__ : Optional[int] = DeeBertModel(__lowercase )
a__ : List[Any] = nn.Dropout(config.hidden_dropout_prob )
a__ : Any = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=-1 , __lowercase=False , ) -> str:
"""simple docstring"""
a__ : List[Any] = self.num_layers
try:
a__ : Union[str, Any] = self.bert(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
a__ : int = outputs[1]
a__ : Any = self.dropout(__lowercase )
a__ : List[str] = self.classifier(__lowercase )
a__ : List[str] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a__ : str = e.message
a__ : Optional[Any] = e.exit_layer
a__ : Optional[int] = outputs[0]
if not self.training:
a__ : Any = entropy(__lowercase )
a__ : Any = []
a__ : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a__ : Any = MSELoss()
a__ : Any = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : int = CrossEntropyLoss()
a__ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a__ : List[str] = []
for highway_exit in outputs[-1]:
a__ : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a__ : List[Any] = MSELoss()
a__ : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : Dict = CrossEntropyLoss()
a__ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
a__ : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a__ : Any = (loss,) + outputs
if not self.training:
a__ : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a__ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 353 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : str =logging.getLogger(__name__)
@dataclass
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "Whether to SortishSamler or not."} )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "whether to use adafactor"} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(default=A__ , metadata={"help": "Dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[str] = field(
default="linear" , metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 266 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCamelCase : str = """true"""
def A_ ( _lowerCAmelCase , _lowerCAmelCase=82 , _lowerCAmelCase=16 ) -> Optional[Any]:
set_seed(42 )
UpperCamelCase : Union[str, Any] = RegressionModel()
UpperCamelCase : List[Any] = deepcopy(_lowerCAmelCase )
UpperCamelCase : str = RegressionDataset(length=_lowerCAmelCase )
UpperCamelCase : List[str] = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase )
model.to(accelerator.device )
UpperCamelCase : Dict = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return model, ddp_model, dataloader
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
UpperCamelCase : Any = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_lowerCAmelCase ):
UpperCamelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
UpperCamelCase : Tuple = dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
UpperCamelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=16 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Any = Accelerator(dispatch_batches=_lowerCAmelCase , split_batches=_lowerCAmelCase )
UpperCamelCase : Any = get_dataloader(_lowerCAmelCase , not dispatch_batches )
UpperCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=_lowerCAmelCase )
UpperCamelCase : Optional[int] = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Dict = []
for batch in dataloader:
UpperCamelCase : int = batch.values()
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(_lowerCAmelCase )
UpperCamelCase : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCamelCase : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def A_ ( _lowerCAmelCase , _lowerCAmelCase=82 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=16 ) -> Optional[int]:
UpperCamelCase : Tuple = get_basic_setup(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = generate_predictions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}"""
def A_ ( _lowerCAmelCase = False , _lowerCAmelCase = False ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = evaluate.load("glue" , "mrpc" )
UpperCamelCase : Optional[int] = get_mrpc_setup(_lowerCAmelCase , _lowerCAmelCase )
# First do baseline
UpperCamelCase : List[str] = setup["no"]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
UpperCamelCase : Any = model(**_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase , references=batch["labels"] )
UpperCamelCase : Dict = metric.compute()
# Then do distributed
UpperCamelCase : int = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCamelCase : List[str] = model(**_lowerCAmelCase )
UpperCamelCase : str = outputs.logits.argmax(dim=-1 )
UpperCamelCase : List[Any] = batch["labels"]
UpperCamelCase : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
UpperCamelCase : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def A_ ( ) -> Optional[Any]:
UpperCamelCase : List[Any] = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_lowerCAmelCase , _lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCamelCase : List[Any] = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_lowerCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
UpperCamelCase : List[Any] = Accelerator()
test_torch_metrics(_lowerCAmelCase , 512 )
accelerator.state._reset_state()
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 358 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = num_patches + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFDeiTModel(config=A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFDeiTForMaskedImageModeling(config=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[Any] = TFDeiTForMaskedImageModeling(A_ )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFDeiTModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Dense ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
UpperCamelCase : str = model(**A_ )
# verify the logits
UpperCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 140 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Any , _A : int , _A : str ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : Union[str, Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Dict = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : Optional[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Union[str, Any] = int(_A )
UpperCAmelCase__ : Any = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : int = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : Union[str, Any] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Any = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : Union[str, Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 181 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 64 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 64 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a: List[str] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = ["""OwlViTFeatureExtractor"""]
__a: Tuple = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__a: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE : Any = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE : Union[str, Any] = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE : int = re.compile(rF'^({joined_dirs}).*?\.py$')
SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 102 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase, _a ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = self.tool('''hey''' )
snake_case_ : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = self.tool('''hey''' )
snake_case_ : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 279 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : str = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[int] = ffn_dropout
lowercase__ : int = attn_dropout
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = MobileViTVaModelTester(self )
lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCAmelCase( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = model.to(__lowerCAmelCase )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__lowerCAmelCase )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : List[str] = model.to(__lowerCAmelCase )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = prepare_img()
lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.detach().cpu()
lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase__ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 198 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
super().__init__()
lowercase__ : List[str] = model
lowercase__ : Dict = 2
lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase( self ) -> str:
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load longformer model from model identifier
lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase )
lowercase__ : List[str] = LightningModel(UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 198 | 1 |
'''simple docstring'''
lowerCAmelCase_ = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 352 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_SCREAMING_SNAKE_CASE = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ''' Hello world! cécé herlolip'''
_SCREAMING_SNAKE_CASE = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ):
__lowercase = dct.pop(lowerCamelCase_ )
__lowercase = val
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
if not os.path.exists(lowerCamelCase_ ):
__lowercase = torch.hub.load('''pytorch/fairseq''' , lowerCamelCase_ ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace('''.''' , '''-''' )
__lowercase = BartConfig.from_pretrained(lowerCamelCase_ )
__lowercase = bart.encode(lowerCamelCase_ ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = bart.predict('''mnli''' , lowerCamelCase_ , return_logits=lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , '''lm_head''' ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 217 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__a = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
__a = {
"""gpt2""": 1_024,
"""gpt2-medium""": 1_024,
"""gpt2-large""": 1_024,
"""gpt2-xl""": 1_024,
"""distilgpt2""": 1_024,
}
class A__ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ : Dict = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Tuple = GPTaTokenizer
def __init__( self : Optional[int] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Tuple="<|endoftext|>" , lowerCAmelCase__ : str="<|endoftext|>" , lowerCAmelCase__ : Dict="<|endoftext|>" , lowerCAmelCase__ : Tuple=False , **lowerCAmelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("add_bos_token" , lowercase_ )
_UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
_UpperCAmelCase : int = getattr(lowercase_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase : str = add_prefix_space
_UpperCAmelCase : Dict = pre_tok_class(**lowercase_ )
_UpperCAmelCase : Optional[Any] = add_prefix_space
def _lowerCAmelCase ( self : str , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : "Conversation" ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
_UpperCAmelCase : Any = input_ids[-self.model_max_length :]
return input_ids | 145 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : str = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
| 91 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case ) -> Any:
'''simple docstring'''
__a =parent
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {}
def UpperCamelCase_( ):
"""simple docstring"""
__a ='<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
__a ='\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__ ( self ) -> int:
'''simple docstring'''
# Initialize feature_extractor
__a =self.feature_extraction_class()
# Test not batched input
__a =get_html_strings()[0]
__a =feature_extractor(__snake_case )
# fmt: off
__a =[['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
__a =[['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , __snake_case )
self.assertEqual(encoding.xpaths , __snake_case )
# Test batched
__a =get_html_strings()
__a =feature_extractor(__snake_case )
# fmt: off
__a =expected_nodes + [['My First Heading', 'My first paragraph.']]
__a =expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __snake_case )
self.assertEqual(encoding.xpaths , __snake_case )
| 308 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__lowerCAmelCase : Any =None
try:
import msvcrt
except ImportError:
__lowerCAmelCase : str =None
try:
import fcntl
except ImportError:
__lowerCAmelCase : str =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__lowerCAmelCase : Any =OSError
# Data
# ------------------------------------------------
__lowerCAmelCase : Union[str, Any] =[
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__lowerCAmelCase : Any ="3.0.12"
__lowerCAmelCase : Optional[Any] =None
def UpperCamelCase ( ):
global _logger
A__ = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :List[str] , lowercase_ :Any )-> str:
A__ = lock_file
return None
def __str__( self :List[str] )-> List[Any]:
A__ = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCAmelCase :
def __init__( self :str , lowercase_ :str )-> str:
A__ = lock
return None
def __enter__( self :Dict )-> Any:
return self.lock
def __exit__( self :int , lowercase_ :int , lowercase_ :List[str] , lowercase_ :Any )-> int:
self.lock.release()
return None
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :Optional[int]=-1 , lowercase_ :Optional[int]=None )-> str:
A__ = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
A__ = self.hash_filename_if_too_long(lowercase_ , lowercase_ )
# The path to the lock file.
A__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A__ = None
# The default timeout value.
A__ = timeout
# We use this lock primarily for the lock counter.
A__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A__ = 0
return None
@property
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
return self._lock_file
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
return self._timeout
@timeout.setter
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str )-> int:
A__ = float(lowercase_ )
return None
def UpperCAmelCase_ ( self :Dict )-> Tuple:
raise NotImplementedError()
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
raise NotImplementedError()
@property
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
return self._lock_file_fd is not None
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[str]=None , lowercase_ :Tuple=0.0_5 )-> List[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
A__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A__ = id(self )
A__ = self._lock_file
A__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowercase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Optional[int]=False )-> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A__ = id(self )
A__ = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
A__ = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self :int )-> Union[str, Any]:
self.acquire()
return self
def __exit__( self :Tuple , lowercase_ :Tuple , lowercase_ :str , lowercase_ :Any )-> Dict:
self.release()
return None
def __del__( self :str )-> Tuple:
self.release(force=lowercase_ )
return None
def UpperCAmelCase_ ( self :str , lowercase_ :str , lowercase_ :int )-> str:
A__ = os.path.basename(lowercase_ )
if len(lowercase_ ) > max_length and max_length > 0:
A__ = os.path.dirname(lowercase_ )
A__ = str(hash(lowercase_ ) )
A__ = filename[: max_length - len(lowercase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowercase_ , lowercase_ )
else:
return path
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Dict , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any]=-1 , lowercase_ :Dict=None )-> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
A__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase_ ( self :Any )-> Dict:
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowercase_ )
else:
A__ = fd
return None
def UpperCAmelCase_ ( self :int )-> Optional[int]:
A__ = self._lock_file_fd
A__ = None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1 )
os.close(lowercase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Optional[Any] , lowercase_ :Tuple , lowercase_ :List[str]=-1 , lowercase_ :List[Any]=None )-> List[Any]:
A__ = os.statvfs(os.path.dirname(lowercase_ ) ).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> Any:
A__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A__ = os.open(self._lock_file , lowercase_ )
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowercase_ )
else:
A__ = fd
return None
def UpperCAmelCase_ ( self :List[Any] )-> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
A__ = self._lock_file_fd
A__ = None
fcntl.flock(lowercase_ , fcntl.LOCK_UN )
os.close(lowercase_ )
return None
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :str )-> List[str]:
A__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A__ = os.open(self._lock_file , lowercase_ )
except OSError:
pass
else:
A__ = fd
return None
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[Any]:
os.close(self._lock_file_fd )
A__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__lowerCAmelCase : int =None
if msvcrt:
__lowerCAmelCase : List[str] =WindowsFileLock
elif fcntl:
__lowerCAmelCase : Union[str, Any] =UnixFileLock
else:
__lowerCAmelCase : str =SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 237 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] =16
__lowerCAmelCase : Tuple =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : DatasetDict , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = DatasetDict(
{
"train": dataset["train"].select(_lowerCamelCase ),
"validation": dataset["train"].select(_lowerCamelCase ),
"test": dataset["validation"],
} )
def tokenize_function(_lowerCamelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["test"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset("glue" , "mrpc" )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCamelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowerCamelCase ):
A__, A__, A__ = get_fold_dataloaders(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowerCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(_lowerCamelCase , dim=0 )
A__ = torch.stack(_lowerCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=_lowerCamelCase , references=_lowerCamelCase )
accelerator.print("Average test metrics from all folds:" , _lowerCamelCase )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_lowerCamelCase , default=3 , help="The number of splits to perform across the dataset" )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCamelCase_ : Optional[int] = """src/transformers"""
lowerCamelCase_ : int = """docs/source/en/tasks"""
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.readlines()
# Find the start prompt.
a =0
while not lines[start_index].startswith(UpperCamelCase__ ):
start_index += 1
start_index += 1
a =start_index
while not lines[end_index].startswith(UpperCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ : int = direct_transformers_import(TRANSFORMERS_PATH)
lowerCamelCase_ : int = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCamelCase_ : Union[str, Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _A ( lowercase ):
"""simple docstring"""
a =TASK_GUIDE_TO_MODELS[task_guide]
a =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase__ , set() )
a ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _A ( lowercase , lowercase=False ):
"""simple docstring"""
a , a , a , a =_find_text_in_file(
filename=os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
a =get_model_list_for_task(UpperCamelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase_ : Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 363 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **__A ) -> Dict:
super().__init__(**__A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__A )
def __call__( self , __A , __A = None , **__A , ) -> List[str]:
if "text_queries" in kwargs:
a =kwargs.pop('''text_queries''' )
if isinstance(__A , (str, Image.Image) ):
a ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
a =image
a =super().__call__(__A , **__A )
return results
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[Any]:
a ={}
if "threshold" in kwargs:
a =kwargs['''threshold''']
if "top_k" in kwargs:
a =kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =load_image(inputs['''image'''] )
a =inputs['''candidate_labels''']
if isinstance(__A , __A ):
a =candidate_labels.split(''',''' )
a =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__A ):
a =self.tokenizer(__A , return_tensors=self.framework )
a =self.image_processor(__A , return_tensors=self.framework )
yield {
"is_last": i == len(__A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
a =model_inputs.pop('''target_size''' )
a =model_inputs.pop('''candidate_label''' )
a =model_inputs.pop('''is_last''' )
a =self.model(**__A )
a ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , __A , __A=0.1 , __A=None ) -> List[str]:
a =[]
for model_output in model_outputs:
a =model_output['''candidate_label''']
a =BaseModelOutput(__A )
a =self.image_processor.post_process_object_detection(
outputs=__A , threshold=__A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
a =outputs['''scores'''][index].item()
a =self._get_bounding_box(outputs['''boxes'''][index][0] )
a ={'''score''': score, '''label''': label, '''box''': box}
results.append(__A )
a =sorted(__A , key=lambda __A : x["score"] , reverse=__A )
if top_k:
a =results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
a , a , a , a =box.int().tolist()
a ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox | 215 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A : List[str] = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = ["""PerceiverFeatureExtractor"""]
_A : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | """simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase : Optional[int] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase : List[str] = logging.getLogger()
def A_ ( ) -> Tuple:
a__ : Any = argparse.ArgumentParser()
parser.add_argument('-f' )
a__ : Tuple = parser.parse_args()
return args.f
def A_ ( A__ , A__="eval" ) -> Union[str, Any]:
a__ : Any = os.path.join(_lowerCAmelCase , F'{split}_results.json' )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , 'r' ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(F'can\'t find {path}' )
lowercase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( lowerCamelCase__ ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.get_auto_remove_tmp_dir()
a__ : Tuple = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_flax_glue.main()
a__ : Any = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.get_auto_remove_tmp_dir()
a__ : Tuple = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_clm_flax.main()
a__ : str = get_results(lowercase)
self.assertLess(result['eval_perplexity'] , 100)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Dict = self.get_auto_remove_tmp_dir()
a__ : Any = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_summarization_flax.main()
a__ : str = get_results(lowercase , split='test')
self.assertGreaterEqual(result['test_rouge1'] , 10)
self.assertGreaterEqual(result['test_rouge2'] , 2)
self.assertGreaterEqual(result['test_rougeL'] , 7)
self.assertGreaterEqual(result['test_rougeLsum'] , 7)
@slow
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = self.get_auto_remove_tmp_dir()
a__ : Dict = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_mlm_flax.main()
a__ : str = get_results(lowercase)
self.assertLess(result['eval_perplexity'] , 42)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = self.get_auto_remove_tmp_dir()
a__ : List[Any] = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_ta_mlm_flax.main()
a__ : Optional[int] = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.42)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Dict = 7 if get_gpu_count() > 1 else 2
a__ : Any = self.get_auto_remove_tmp_dir()
a__ : str = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_flax_ner.main()
a__ : Union[str, Any] = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
self.assertGreaterEqual(result['eval_f1'] , 0.3)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_auto_remove_tmp_dir()
a__ : Dict = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_qa.main()
a__ : Tuple = get_results(lowercase)
self.assertGreaterEqual(result['eval_f1'] , 30)
self.assertGreaterEqual(result['eval_exact'] , 30)
| 354 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A_ ( A__ ) -> Dict:
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : List[str] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Any = self.get_vision_text_model(lowercase , lowercase)
a__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : List[str] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : str = after_output[0].numpy()
a__ : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[int] = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[Any] = to_atuple(vision_model.config.image_size)
a__ : Dict = to_atuple(vision_model.config.patch_size)
a__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : str = np.abs((a - b)).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
a__ : Optional[int] = model_a(**lowercase)
a__ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase)
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : int = model_a(**lowercase)
a__ : str = after_outputs[0].numpy()
a__ : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
a__ : str = 13
a__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Optional[int] = random_attention_mask([batch_size, 4])
a__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFViTModel(lowercase , name='vision_model')
a__ : Tuple = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFViTModelTester(self)
a__ : int = TFBertModelTester(self)
a__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
a__ : Any = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[int] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
a__ : Union[str, Any] = 13
a__ : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Any = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : int = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : Optional[int] = to_atuple(vision_model.config.image_size)
a__ : str = to_atuple(vision_model.config.patch_size)
a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = TFDeiTModel(lowercase , name='vision_model')
a__ : Optional[int] = TFRobertaModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = TFDeiTModelTester(self)
a__ : str = TFRobertaModelTester(self)
a__ : str = vit_model_tester.prepare_config_and_inputs()
a__ : Dict = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Any = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
a__ : Optional[int] = 13
a__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : str = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFCLIPVisionModel(lowercase , name='vision_model')
a__ : str = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = TFCLIPVisionModelTester(self)
a__ : Dict = TFBertModelTester(self)
a__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : Union[str, Any] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase)
a__ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a__ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np')
a__ : int = model(**lowercase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1e-3))
| 225 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[str] = row, column
__UpperCAmelCase : Optional[Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )]
def __str__( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__UpperCAmelCase : int = 0
for row_vector in self.array:
for obj in row_vector:
__UpperCAmelCase : str = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) )
__UpperCAmelCase : List[str] = f'%{max_element_length}s'
# Make string and return
def single_line(__UpperCAmelCase ) -> str:
nonlocal string_format_identifier
__UpperCAmelCase : Optional[Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
'''simple docstring'''
return str(self )
def __A ( self , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
__UpperCAmelCase : Dict = value
def __add__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
__UpperCAmelCase : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : int = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : Optional[Any] = -self[r, c]
return result
def __sub__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication
__UpperCAmelCase : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
__UpperCAmelCase : Union[str, Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__UpperCAmelCase : List[str] = f'Unsupported type given for another ({type(__UpperCAmelCase )})'
raise TypeError(__UpperCAmelCase )
def __A ( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : Any = self[r, c]
return result
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__UpperCAmelCase : int = v.transpose()
__UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : int = Matrix(3 , 3 , 0 )
for i in range(3 ):
__UpperCAmelCase : List[Any] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__UpperCAmelCase : Dict = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3
__UpperCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' )
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 254 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int = 50 ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 254 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCAmelCase = len(_A ) - 1
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) , 5 ) == 1
return output_values
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = self.basis_function(_A )
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __SCREAMING_SNAKE_CASE( self , _A = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowerCAmelCase = [] # x coordinates of points to plot
__lowerCAmelCase = [] # y coordinates of points to plot
__lowerCAmelCase = 0.0
while t <= 1:
__lowerCAmelCase = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowerCAmelCase = [i[0] for i in self.list_of_points]
__lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
_A , _A , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(_A , _A , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 102 |
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase , __lowerCAmelCase = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 102 | 1 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = len(lowerCamelCase__ )
for i in range(length - 1 ):
lowercase__ : List[str] = i
for k in range(i + 1 , lowerCamelCase__ ):
if collection[k] < collection[least]:
lowercase__ : str = k
if least != i:
lowercase__ , lowercase__ : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 130 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 130 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
a_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
a_ = "▁"
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = 'left'
__UpperCamelCase = XLNetTokenizer
def __init__( self : Any , a__ : Any=None , a__ : str=None , a__ : Dict=False , a__ : Tuple=True , a__ : Union[str, Any]=False , a__ : int="<s>" , a__ : Optional[int]="</s>" , a__ : Optional[Any]="<unk>" , a__ : List[Any]="<sep>" , a__ : Optional[int]="<pad>" , a__ : str="<cls>" , a__ : Tuple="<mask>" , a__ : Optional[int]=["<eop>", "<eod>"] , **a__ : Optional[Any] , ) -> int:
'''simple docstring'''
_A = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , additional_special_tokens=a__ , **a__ , )
_A = 3
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = False if not self.vocab_file else True
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a_ ( self : Dict , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.sep_token_id]
_A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a_ ( self : Optional[int] , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,) | 163 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 't5'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , a__ : Optional[int]=3_21_28 , a__ : Any=5_12 , a__ : Any=64 , a__ : List[str]=20_48 , a__ : Tuple=6 , a__ : Dict=None , a__ : Optional[int]=8 , a__ : int=32 , a__ : List[str]=1_28 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=1E-6 , a__ : Dict=1.0 , a__ : Optional[int]="relu" , a__ : Tuple=True , a__ : Any=True , a__ : Tuple=0 , a__ : Optional[Any]=1 , **a__ : Tuple , ) -> int:
'''simple docstring'''
_A = vocab_size
_A = d_model
_A = d_kv
_A = d_ff
_A = num_layers
_A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A = num_heads
_A = relative_attention_num_buckets
_A = relative_attention_max_distance
_A = dropout_rate
_A = layer_norm_epsilon
_A = initializer_factor
_A = feed_forward_proj
_A = use_cache
_A = self.feed_forward_proj.split("-" )
_A = act_info[-1]
_A = act_info[0] == "gated"
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_A = "gelu_new"
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , )
class snake_case ( _UpperCamelCase):
@property
def a_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_A = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_A = "past_encoder_sequence + sequence"
_A = {0: "batch"}
_A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A = {0: "batch", 1: "decoder_sequence"}
_A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
return common_inputs
@property
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 13 | 163 | 1 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not postfix_notation:
return 0
__A = {'''+''', '''-''', '''*''', '''/'''}
__A = []
for token in postfix_notation:
if token in operations:
__A , __A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 358 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[str]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple=(64,) , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : List[str]="silu" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> str:
super().__init__()
lowerCAmelCase__ = layers_per_block
lowerCAmelCase__ = torch.nn.Convad(
SCREAMING_SNAKE_CASE__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase__ = None
lowerCAmelCase__ = nn.ModuleList([] )
# down
lowerCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
lowerCAmelCase__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
lowerCAmelCase__ = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , resnet_groups=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
lowerCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
# out
lowerCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE__ , eps=1e-6 )
lowerCAmelCase__ = nn.SiLU()
lowerCAmelCase__ = 2 * out_channels if double_z else out_channels
lowerCAmelCase__ = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE__ , 3 , padding=1 )
lowerCAmelCase__ = False
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
lowerCAmelCase__ = x
lowerCAmelCase__ = self.conv_in(SCREAMING_SNAKE_CASE__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE__ : Optional[int] ):
def custom_forward(*SCREAMING_SNAKE_CASE__ : Dict ):
return module(*SCREAMING_SNAKE_CASE__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
else:
for down_block in self.down_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ )
else:
# down
for down_block in self.down_blocks:
lowerCAmelCase__ = down_block(SCREAMING_SNAKE_CASE__ )
# middle
lowerCAmelCase__ = self.mid_block(SCREAMING_SNAKE_CASE__ )
# post-process
lowerCAmelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_act(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_out(SCREAMING_SNAKE_CASE__ )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : int=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple=(64,) , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : List[str]="silu" , SCREAMING_SNAKE_CASE__ : List[str]="group" , ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase__ = layers_per_block
lowerCAmelCase__ = nn.Convad(
SCREAMING_SNAKE_CASE__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase__ = None
lowerCAmelCase__ = nn.ModuleList([] )
lowerCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
lowerCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
# up
lowerCAmelCase__ = list(reversed(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = reversed_block_out_channels[i]
lowerCAmelCase__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
lowerCAmelCase__ = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , resnet_groups=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , resnet_time_scale_shift=SCREAMING_SNAKE_CASE__ , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output_channel
# out
if norm_type == "spatial":
lowerCAmelCase__ = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE__ , eps=1e-6 )
lowerCAmelCase__ = nn.SiLU()
lowerCAmelCase__ = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE__ , 3 , padding=1 )
lowerCAmelCase__ = False
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Dict:
lowerCAmelCase__ = z
lowerCAmelCase__ = self.conv_in(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE__ : List[str] ):
return module(*SCREAMING_SNAKE_CASE__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
else:
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# middle
lowerCAmelCase__ = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ = up_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# post-process
if latent_embeds is None:
lowerCAmelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_act(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_out(SCREAMING_SNAKE_CASE__ )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]="random" , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = n_e
lowerCAmelCase__ = vq_embed_dim
lowerCAmelCase__ = beta
lowerCAmelCase__ = legacy
lowerCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowerCAmelCase__ = self.used.shape[0]
lowerCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCAmelCase__ = self.re_embed
lowerCAmelCase__ = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowerCAmelCase__ = n_e
lowerCAmelCase__ = sane_index_shape
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = inds.shape
assert len(SCREAMING_SNAKE_CASE__ ) > 1
lowerCAmelCase__ = inds.reshape(ishape[0] , -1 )
lowerCAmelCase__ = self.used.to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
lowerCAmelCase__ = match.argmax(-1 )
lowerCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCAmelCase__ = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
lowerCAmelCase__ = inds.shape
assert len(SCREAMING_SNAKE_CASE__ ) > 1
lowerCAmelCase__ = inds.reshape(ishape[0] , -1 )
lowerCAmelCase__ = self.used.to(SCREAMING_SNAKE_CASE__ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCAmelCase__ = 0 # simply set to zero
lowerCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE__ )
return back.reshape(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
# reshape z -> (batch, height, width, channel) and flatten
lowerCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCAmelCase__ = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE__ , self.embedding.weight ) , dim=1 )
lowerCAmelCase__ = self.embedding(SCREAMING_SNAKE_CASE__ ).view(z.shape )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
lowerCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCAmelCase__ = self.remap_to_used(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def a ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCAmelCase__ = self.unmap_to_all(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCAmelCase__ = self.embedding(SCREAMING_SNAKE_CASE__ )
if shape is not None:
lowerCAmelCase__ = z_q.view(SCREAMING_SNAKE_CASE__ )
# reshape back to match original input shape
lowerCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=False ) -> Optional[int]:
lowerCAmelCase__ = parameters
lowerCAmelCase__ , lowerCAmelCase__ = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 , dim=1 )
lowerCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCAmelCase__ = deterministic
lowerCAmelCase__ = torch.exp(0.5 * self.logvar )
lowerCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
lowerCAmelCase__ = lowerCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCAmelCase__ = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE__ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCAmelCase__ = self.mean + self.std * sample
return x
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Union[str, Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=[1, 2, 3] ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Dict:
return self.mean
| 221 | 0 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowercase ( _snake_case : str ) ->Optional[int]:
"""simple docstring"""
if not sentence:
return ""
__snake_case : List[str] = dict(zip(__lowercase , __lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102 | from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
if not sentence:
return ""
A_ : List[str] = dict(zip(__lowercase ,__lowercase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase__ (snake_case__ : Iterable[str] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = iter(snake_case__ )
while True:
_snake_case : List[str] = tuple(itertools.islice(snake_case__ , snake_case__ ) )
if not chunk:
return
yield chunk
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
_snake_case : List[str] = """"""
if len(snake_case__ ) < 2:
return dirty
for i in range(len(snake_case__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case__ ) & 1:
clean += "X"
return clean
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Dict = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_snake_case : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case__ )
return table
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = generate_table(snake_case__ )
_snake_case : Tuple = prepare_input(snake_case__ )
_snake_case : int = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
_snake_case , _snake_case : int = divmod(table.index(snake_case__ ) , 5 )
_snake_case , _snake_case : Dict = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = generate_table(snake_case__ )
_snake_case : List[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
_snake_case , _snake_case : Optional[int] = divmod(table.index(snake_case__ ) , 5 )
_snake_case , _snake_case : Tuple = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 64 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : str = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Dict = add_prefix_space
_snake_case : str = pre_tok_class(**a_ )
_snake_case : List[Any] = add_prefix_space
def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ):
'''simple docstring'''
_snake_case : Any = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : List[str] = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ):
'''simple docstring'''
def find_re(a_: Dict, a_: str, a_: Union[str, Any] ):
_snake_case : Any = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : int = completion[: prints[1].start()]
_snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : List[Any] = completion[: defs[1].start()]
_snake_case : int = 0
_snake_case : List[Any] = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 64 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : int
_UpperCAmelCase : int
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: list[list[Edge]] = [[] for _ in range(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: Dict = size
def __getitem__( self : Dict , lowerCAmelCase__ : int):
return iter(self._graph[vertex])
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self._size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = deque([start_vertex])
SCREAMING_SNAKE_CASE_: list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_: List[Any] = 0
while queue:
SCREAMING_SNAKE_CASE_: int = queue.popleft()
SCREAMING_SNAKE_CASE_: str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_: Optional[int] = current_distance + edge.weight
SCREAMING_SNAKE_CASE_: str = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_: Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(_UpperCAmelCase , "words.txt" )
SCREAMING_SNAKE_CASE_: Dict = ""
with open(_UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: int = f.readline()
SCREAMING_SNAKE_CASE_: Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
SCREAMING_SNAKE_CASE_: List[Any] = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 127 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return None
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
from transformers import BertModel
snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__magic_name__ ) )
vocab_file.flush()
snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) )
model.save_pretrained(__magic_name__ )
self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ )
@require_tf
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
snake_case_ : List[str] = quantize(Path(__magic_name__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
snake_case_ : Any = quantize(__magic_name__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
return path
except Exception as e:
self.fail(__magic_name__ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ )
# Assert all variables are present
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __magic_name__ )
self.assertSequenceEqual(variable_names[3:] , __magic_name__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__magic_name__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase__ : Dict = logging.getLogger(__name__)
lowercase__ : Union[str, Any] = 'Hello world! cécé herlolip'
lowercase__ : List[str] = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def a__ ( lowercase : Any, lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = BertAbsConfig(
temp_dir='''.''', finetune_bert=lowerCamelCase__, large=lowerCamelCase__, share_emb=lowerCamelCase__, use_bert_emb=lowerCamelCase__, encoder='''bert''', max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, )
_UpperCamelCase = torch.load(lowerCamelCase__, lambda lowercase, lowercase : storage )
_UpperCamelCase = AbsSummarizer(lowerCamelCase__, torch.device('''cpu''' ), lowerCamelCase__ )
original.eval()
_UpperCamelCase = BertAbsSummarizer(lowerCamelCase__, torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
_UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
_UpperCamelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase__ )) )
_UpperCamelCase = torch.tensor(lowerCamelCase__ ).unsqueeze(0 )
_UpperCamelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase__ )) )
_UpperCamelCase = torch.tensor(lowerCamelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_UpperCamelCase = encoder_input_ids
_UpperCamelCase = decoder_input_ids
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_UpperCamelCase = original(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )[0]
_UpperCamelCase = original.generator(lowerCamelCase__ )
_UpperCamelCase = new_model(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )[0]
_UpperCamelCase = new_model.generator(lowerCamelCase__ )
_UpperCamelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowerCamelCase__ ) )
_UpperCamelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowerCamelCase__ ) )
_UpperCamelCase = torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict(), '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 354 |
'''simple docstring'''
def a__ ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case ( __snake_case ):
# to overwrite at feature extractactor specific tests
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
@property
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "feature_size"))
self.assertTrue(hasattr(UpperCamelCase__ , "sampling_rate"))
self.assertTrue(hasattr(UpperCamelCase__ , "padding_value"))
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: int = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(UpperCamelCase__) == len(UpperCamelCase__) for x, y in zip(UpperCamelCase__ , processed_features[input_name])))
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase: str = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
__lowerCAmelCase: int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str]=False)-> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : Tuple):
__lowerCAmelCase: Optional[int] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : str , UpperCamelCase__ : str):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[int] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase: str = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase: Tuple = self.feat_extract_tester.min_seq_length
__lowerCAmelCase: Union[str, Any] = self.feat_extract_tester.batch_size
__lowerCAmelCase: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase: Optional[int] = feat_extract.pad(UpperCamelCase__ , padding=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest")
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[-1]))
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
__lowerCAmelCase: Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="np")
__lowerCAmelCase: Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , pad_to_multiple_of=1_0)
__lowerCAmelCase: Tuple = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="longest" , pad_to_multiple_of=1_0)
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__)
__lowerCAmelCase: str = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: List[str] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase__) % 1_0 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
__lowerCAmelCase: Optional[Any] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(UpperCamelCase__) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__lowerCAmelCase: Any = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int]=False)-> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : List[Any]):
__lowerCAmelCase: List[str] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__lowerCAmelCase: int = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Any = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]))
__lowerCAmelCase: Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to smallest with np
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=UpperCamelCase__ , )
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to middle
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
__lowerCAmelCase: str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Tuple = 1_2
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , )
__lowerCAmelCase: str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase: Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase: Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
@require_torch
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: Any = True
__lowerCAmelCase: str = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Tuple = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , UpperCamelCase__)
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: str = True
__lowerCAmelCase: int = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: List[str] = min(UpperCamelCase__)
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 217 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[str] = 0
while number > 0:
__lowerCAmelCase: Any = number % 1_0
sum_of_digits += last_digit
__lowerCAmelCase: List[Any] = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0 ) -> int:
__lowerCAmelCase: Tuple = factorial(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 217 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , *A : int , **A : Optional[int] ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 293 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = parent
def __a ( self : int ) -> str:
"""simple docstring"""
return {}
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
lowercase : Any = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = MarkupLMFeatureExtractor if is_bsa_available() else None
def __a ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Any = self.feature_extraction_class()
# Test not batched input
lowercase : Tuple = get_html_strings()[0]
lowercase : List[Any] = feature_extractor(_A )
# fmt: off
lowercase : int = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
lowercase : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , _A )
self.assertEqual(encoding.xpaths , _A )
# Test batched
lowercase : Optional[Any] = get_html_strings()
lowercase : Optional[int] = feature_extractor(_A )
# fmt: off
lowercase : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
lowercase : int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _A )
self.assertEqual(encoding.xpaths , _A ) | 308 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 308 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase , __lowercase=False ) -> Optional[Any]:
'''simple docstring'''
_A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_A = ''''''
else:
_A = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A = dct.pop(__lowercase )
_A = val
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = ViTConfig()
_A = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_A = True
_A = int(vit_name[-12:-10] )
_A = int(vit_name[-9:-6] )
else:
_A = 1000
_A = '''huggingface/label-files'''
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = int(vit_name[-6:-4] )
_A = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_A = 192
_A = 768
_A = 12
_A = 3
elif vit_name[9:].startswith("small" ):
_A = 384
_A = 1536
_A = 12
_A = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_A = 768
_A = 2304
_A = 8
_A = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_A = 1024
_A = 4096
_A = 24
_A = 16
elif vit_name[4:].startswith("huge" ):
_A = 1280
_A = 5120
_A = 32
_A = 16
# load original model from timm
_A = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_A = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
_A = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_A = ViTModel(__lowercase ).eval()
else:
_A = ViTForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_A = DeiTImageProcessor(size=config.image_size )
else:
_A = ViTImageProcessor(size=config.image_size )
_A = image_processor(images=prepare_img() , return_tensors="pt" )
_A = encoding['''pixel_values''']
_A = model(__lowercase )
if base_model:
_A = timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 )
else:
_A = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 357 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> jnp.ndarray:
'''simple docstring'''
_A = jnp.zeros_like(__lowercase )
_A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_A = shifted_input_ids.at[:, 0].set(__lowercase )
_A = jnp.where(shifted_input_ids == -100 , __lowercase , __lowercase )
return shifted_input_ids
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''mt5'''
snake_case = MTaConfig
| 174 | 0 |
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return number | (1 << position)
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return number & ~(1 << position)
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return number ^ (1 << position)
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[Any] = 1024
_UpperCAmelCase : Optional[int] = 4096
_UpperCAmelCase : Tuple = 24
_UpperCAmelCase : List[str] = 16
_UpperCAmelCase : str = [5, 11, 17, 23]
_UpperCAmelCase : Tuple = [256, 512, 1024, 1024]
_UpperCAmelCase : List[str] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = 150
_UpperCAmelCase : Tuple = """huggingface/label-files"""
_UpperCAmelCase : int = """ade20k-id2label.json"""
_UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
_UpperCAmelCase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : int = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
_UpperCAmelCase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
_UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : List[str] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
_UpperCAmelCase : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
_UpperCAmelCase : Dict = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
_UpperCAmelCase : List[Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
_UpperCAmelCase : Optional[int] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
_UpperCAmelCase : List[str] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
_UpperCAmelCase : str = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
_UpperCAmelCase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : Tuple = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
_UpperCAmelCase : Tuple = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
_UpperCAmelCase : int = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
_UpperCAmelCase : List[str] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Tuple = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : str = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_UpperCAmelCase : Any = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
_UpperCAmelCase : Tuple = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
_UpperCAmelCase : Dict = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
_UpperCAmelCase : List[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
_UpperCAmelCase : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def snake_case_ ( )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase : List[str] = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
_UpperCAmelCase : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase_ )
_UpperCAmelCase : Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
_UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowerCAmelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Tuple = 480 if """ade""" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(lowerCAmelCase_ , return_tensors="""pt""" )
# forward pass
_UpperCAmelCase : Optional[Any] = model(**lowerCAmelCase_ ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
# Assert logits
_UpperCAmelCase : Optional[int] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(lowerCAmelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase_ )
)
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
A_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 215 | 0 |
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
if any(not isinstance(__UpperCamelCase ,__UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__UpperCamelCase ,sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 329 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4 | 329 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 'instructblip_vision_model'
def __init__(self : Union[str, Any] , __UpperCAmelCase : Optional[int]=1_4_0_8 , __UpperCAmelCase : List[Any]=6_1_4_4 , __UpperCAmelCase : Optional[int]=3_9 , __UpperCAmelCase : Optional[Any]=1_6 , __UpperCAmelCase : Optional[Any]=2_2_4 , __UpperCAmelCase : Optional[Any]=1_4 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Tuple=1E-6 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Tuple=1E-10 , __UpperCAmelCase : int=True , **__UpperCAmelCase : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = qkv_bias
@classmethod
def lowercase_ (cls : Union[str, Any] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Tuple ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'instructblip_qformer'
def __init__(self : str , __UpperCAmelCase : Tuple=3_0_5_2_2 , __UpperCAmelCase : Dict=7_6_8 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : Any=3_0_7_2 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Any=1_4_0_8 , **__UpperCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = cross_attention_frequency
UpperCAmelCase__ = encoder_hidden_size
@classmethod
def lowercase_ (cls : Optional[Any] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Dict ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = 'instructblip'
__UpperCAmelCase : Optional[int] = True
def __init__(self : Tuple , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=3_2 , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase__ = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase__ = InstructBlipVisionConfig(**__UpperCAmelCase )
UpperCAmelCase__ = InstructBlipQFormerConfig(**__UpperCAmelCase )
UpperCAmelCase__ = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase__ = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
UpperCAmelCase__ = self.text_config.tie_word_embeddings
UpperCAmelCase__ = self.text_config.is_encoder_decoder
UpperCAmelCase__ = num_query_tokens
UpperCAmelCase__ = self.vision_config.hidden_size
UpperCAmelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase__ = 1.0
UpperCAmelCase__ = 0.02
@classmethod
def lowercase_ (cls : List[Any] , __UpperCAmelCase : InstructBlipVisionConfig , __UpperCAmelCase : InstructBlipQFormerConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , )
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.qformer_config.to_dict()
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 65 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'single_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self : int ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) | 225 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# compute the shape of the output matrix
_UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_UpperCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_UpperCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
return updated_arr
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# compute the shape of the output matrix
_UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_UpperCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_UpperCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE :str = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 156 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 100_0000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = limit + 1
_UpperCAmelCase = [0] * limit
for first_term in range(1 , __lowercase ):
for n in range(__lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 156 | 1 |
"""simple docstring"""
import math
SCREAMING_SNAKE_CASE : str = 10
SCREAMING_SNAKE_CASE : List[str] = 7
SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def lowercase ( _snake_case : int = 20 ) ->str:
"""simple docstring"""
__snake_case : int = math.comb(_snake_case , _snake_case )
__snake_case : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _snake_case )
__snake_case : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 102 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def SCREAMING_SNAKE_CASE (self , a_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__(self , a_ , a_ = 5_12 , a_ = 5_12 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ):
'''simple docstring'''
if isinstance(a_ , a_ ):
__snake_case : Any = 1
elif isinstance(a_ , a_ ):
__snake_case : Any = len(a_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a_ )}.""" )
# get prompt text embeddings
__snake_case : int = self.tokenizer(
a_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__snake_case : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Union[str, Any] = text_embeddings.shape
__snake_case : Optional[int] = text_embeddings.repeat(1 , a_ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : List[Any] = ['''''']
elif type(a_ ) is not type(a_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !="""
f""" {type(a_ )}.""" )
elif isinstance(a_ , a_ ):
__snake_case : List[str] = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__snake_case : Optional[int] = negative_prompt
__snake_case : Optional[int] = text_input_ids.shape[-1]
__snake_case : List[Any] = self.tokenizer(
a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''pt''' , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : str = uncond_embeddings.shape[1]
__snake_case : int = uncond_embeddings.repeat(a_ , a_ , 1 )
__snake_case : int = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Union[str, Any] = torch.randn(
a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(self.device )
__snake_case : Tuple = torch.randn(a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(
self.device )
else:
__snake_case : Dict = torch.randn(
a_ , generator=a_ , device=self.device , dtype=a_ )
__snake_case : Dict = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : Dict = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : int = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : str = max(-dx , 0 )
__snake_case : Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Tuple = {}
if accepts_eta:
__snake_case : List[str] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Tuple = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
__snake_case : int = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : Tuple = noise_pred.chunk(2 )
__snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
__snake_case : Union[str, Any] = 1 / 0.1_8215 * latents
__snake_case : Optional[Any] = self.vae.decode(a_ ).sample
__snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors='''pt''' ).to(
self.device )
__snake_case , __snake_case : List[Any] = self.safety_checker(
images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Union[str, Any] = None
if output_type == "pil":
__snake_case : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 102 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase = str(bin(__lowercase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(__lowercase ) )[2:]
_UpperCAmelCase = max(len(__lowercase ) , len(__lowercase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__lowercase ) , b_binary.zfill(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [False] * len(__lowercase )
_UpperCAmelCase = []
queue.append(__lowercase )
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (len(__lowercase ))
_UpperCAmelCase = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = float("Inf" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(__lowercase , graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
return max_flow
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 156 | 1 |
'''simple docstring'''
from timeit import timeit
def _UpperCamelCase ( UpperCamelCase__ ):
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase__ : List[Any] = 0
while number:
number &= number - 1
result += 1
return result
def _UpperCamelCase ( UpperCamelCase__ ):
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase__ : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _UpperCamelCase ( ):
def do_benchmark(UpperCamelCase__ ) -> None:
UpperCAmelCase__ : Tuple = """import __main__ as z"""
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(UpperCamelCase__ ) = }''' )
UpperCAmelCase__ : List[str] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=UpperCamelCase__ )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCamelCase__ ) = }''' )
UpperCAmelCase__ : Optional[Any] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=UpperCamelCase__ , )
print(f'''timeit() runs in {timing} seconds''' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 163 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
UpperCAmelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return image
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Dict = val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase__ : List[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase__ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
UpperCAmelCase__ : Tuple = qkv_bias
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = 3_6_4 if """coco""" in model_name else 2_2_4
UpperCAmelCase__ : int = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase__ : str = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase__ : List[Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase__ : Dict = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase__ : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
UpperCAmelCase__ : int = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
UpperCAmelCase__ : Tuple = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
UpperCAmelCase__ : int = tokenizer("""\n""" , add_special_tokens=UpperCamelCase__ ).input_ids[0]
UpperCAmelCase__ , UpperCAmelCase__ : Any = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
UpperCAmelCase__ : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCAmelCase__ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCAmelCase__ : List[Any] = original_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
if key.startswith("""Qformer.bert""" ):
UpperCAmelCase__ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCAmelCase__ : Dict = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
UpperCAmelCase__ : Any = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCAmelCase__ : int = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
UpperCAmelCase__ : int = key.replace("""t5""" , """language""" )
UpperCAmelCase__ : List[str] = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase__ : List[Any] = load_demo_image()
UpperCAmelCase__ : Any = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
UpperCAmelCase__ : Any = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
# create processor
UpperCAmelCase__ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
UpperCAmelCase__ : Any = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
UpperCAmelCase__ : Tuple = processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase__ : List[str] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
UpperCAmelCase__ : Union[str, Any] = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
UpperCAmelCase__ : List[str] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
UpperCAmelCase__ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase__ : Optional[Any] = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase__ : Any = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase__ : int = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
UpperCAmelCase__ : int = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
UpperCAmelCase__ : Union[str, Any] = """"""
UpperCAmelCase__ : Dict = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
UpperCAmelCase__ : int = original_model.generate({"""image""": original_pixel_values} )
UpperCAmelCase__ : Optional[Any] = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = input_ids.shape[1]
UpperCAmelCase__ : Optional[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase__ : Any = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
__A =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__A =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : int = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''LayoutLMv2FeatureExtractor''']
a : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _SCREAMING_SNAKE_CASE ( _lowercase : dict ) ->tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->XGBClassifier:
'''simple docstring'''
a : List[Any] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : List[str] = load_iris()
a, a : Optional[int] = data_handling(_lowercase )
a, a, a, a : Tuple = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
a : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
a : Dict = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 79 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase_ = 2
class snake_case :
'''simple docstring'''
def __init__( self : int, *, # begin keyword-only arguments
_lowerCamelCase : Tuple="<s>", _lowerCamelCase : Union[str, Any]="<pad>", _lowerCamelCase : Tuple="</s>", _lowerCamelCase : Dict="<unk>", _lowerCamelCase : Optional[Any]=None, ):
'''simple docstring'''
__A , __A , __A , __A = bos, unk, pad, eos
__A = []
__A = []
__A = {}
__A = self.add_symbol(__UpperCAmelCase )
__A = self.add_symbol(__UpperCAmelCase )
__A = self.add_symbol(__UpperCAmelCase )
__A = self.add_symbol(__UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase )
__A = len(self.symbols )
def __eq__( self : Dict, _lowerCamelCase : Dict ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Tuple, _lowerCamelCase : List[Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Any, _lowerCamelCase : Any ):
'''simple docstring'''
return sym in self.indices
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int, _lowerCamelCase : Any ):
'''simple docstring'''
__A = cls()
d.add_from_file(__UpperCAmelCase )
return d
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : Optional[Any]=1, _lowerCamelCase : str=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
__A = self.indices[word]
__A = self.count[idx] + n
return idx
else:
__A = len(self.symbols )
__A = idx
self.symbols.append(__UpperCAmelCase )
self.count.append(__UpperCAmelCase )
return idx
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str ):
'''simple docstring'''
return 0
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[Any] ):
'''simple docstring'''
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
try:
with open(__UpperCAmelCase, '''r''', encoding='''utf-8''' ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__UpperCAmelCase ) )
return
__A = f.readlines()
__A = self._load_meta(__UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
__A , __A = line.rstrip().rsplit(''' ''', 1 )
if field == "#fairseq:overwrite":
__A = True
__A , __A = line.rsplit(''' ''', 1 )
else:
__A = False
__A = int(__UpperCAmelCase )
__A = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__UpperCAmelCase ) )
self.add_symbol(__UpperCAmelCase, n=__UpperCAmelCase, overwrite=__UpperCAmelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = dict((re.sub(r'''@@$''' , '''''' , UpperCamelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , UpperCamelCase__ ), v) for k, v in d.items() )
__A = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__A = d[k] # restore
return da
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__A = os.path.join(UpperCamelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__A = torch.load(UpperCamelCase__ , map_location='''cpu''' )
__A = chkpt['''cfg''']['''model''']
# dicts
__A = os.path.join(UpperCamelCase__ , '''dict.txt''' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__A = Dictionary.load(UpperCamelCase__ )
__A = rewrite_dict_keys(src_dict.indices )
__A = len(UpperCamelCase__ )
__A = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
__A = os.path.join(UpperCamelCase__ , '''bpecodes''' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__A = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
__A = os.path.join(UpperCamelCase__ , '''config.json''' )
__A = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-1_2,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
__A = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__A = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1_0_2_4,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
__A = chkpt['''model''']
# remove unneeded keys
__A = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
__A = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__A = model_state_dict.pop(UpperCamelCase__ )
else:
__A = model_state_dict.pop(UpperCamelCase__ )
__A = BioGptConfig.from_pretrained(UpperCamelCase__ )
__A = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
__A = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 266 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 2_55 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__UpperCAmelCase ,size['shortest_edge'] ,default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]:
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
return normalize(__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(__UpperCAmelCase )
if do_resize:
A__ = self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase )
if do_center_crop:
A__ = self.center_crop(__UpperCAmelCase ,size=__UpperCAmelCase )
if do_rescale:
A__ = self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ,offset=__UpperCAmelCase )
if do_normalize:
A__ = self.normalize(image=__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase )
A__ = to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase )
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(__UpperCAmelCase )
A__ = [
[
self._preprocess_image(
image=__UpperCAmelCase ,do_resize=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,do_center_crop=__UpperCAmelCase ,crop_size=__UpperCAmelCase ,do_rescale=__UpperCAmelCase ,rescale_factor=__UpperCAmelCase ,offset=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,image_mean=__UpperCAmelCase ,image_std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,)
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE ( ):
A_ : Any = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
A_ : Union[str, Any] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE )
DownloadCommand.register_subcommand(SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
RunCommand.register_subcommand(SCREAMING_SNAKE_CASE )
ServeCommand.register_subcommand(SCREAMING_SNAKE_CASE )
UserCommands.register_subcommand(SCREAMING_SNAKE_CASE )
AddNewModelCommand.register_subcommand(SCREAMING_SNAKE_CASE )
AddNewModelLikeCommand.register_subcommand(SCREAMING_SNAKE_CASE )
LfsCommands.register_subcommand(SCREAMING_SNAKE_CASE )
PTtoTFCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
A_ : Optional[int] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
A_ : Optional[Any] = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 357 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ):
if config_name_or_path is None:
A_ : Optional[Any] = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
A_ : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A_ : List[str] = question_encoder_name_or_path
A_ : int = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
A_ : Optional[Any] = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE )
A_ : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
A_ : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
A_ : str = gen_config
A_ : Tuple = question_encoder_config
A_ : List[Any] = model_class.from_pretrained_question_encoder_generator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizers.
A_ : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
A_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 65 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[Any] )-> str:
'''simple docstring'''
A__ = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def snake_case__ ( cls : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-config' )
except HTTPError:
pass
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = BertConfig(
vocab_size=9_9,hidden_size=3_2,num_hidden_layers=5,num_attention_heads=4,intermediate_size=3_7 )
config.push_to_hub('test-config',use_auth_token=self._token )
A__ = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_,repo_id='test-config',push_to_hub=lowercase_,use_auth_token=self._token )
A__ = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = BertConfig(
vocab_size=9_9,hidden_size=3_2,num_hidden_layers=5,num_attention_heads=4,intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org',use_auth_token=self._token )
A__ = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_,repo_id='valid_org/test-config-org',push_to_hub=lowercase_,use_auth_token=self._token )
A__ = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_,getattr(lowercase_,lowercase_ ) )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
CustomConfig.register_for_auto_class()
A__ = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map,{'AutoConfig': 'custom_configuration.CustomConfig'} )
A__ = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config',trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__,'CustomConfig' )
self.assertEqual(new_config.attribute,4_2 )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ = c.n_embd + 1 # int
A__ = c.resid_pdrop + 1.0 # float
A__ = not c.scale_attn_weights # bool
A__ = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(lowercase_,c.n_embd,'mismatch for key: n_embd' )
self.assertEqual(lowercase_,c.resid_pdrop,'mismatch for key: resid_pdrop' )
self.assertEqual(lowercase_,c.scale_attn_weights,'mismatch for key: scale_attn_weights' )
self.assertEqual(lowercase_,c.summary_type,'mismatch for key: summary_type' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = PretrainedConfig()
A__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase_,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
A__ = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase_,lowercase_ )]
if len(lowercase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(lowercase_ )}.' )
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowercase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder',subfolder='bert' )
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = mock.Mock()
A__ = 5_0_0
A__ = {}
A__ = HTTPError
A__ = {}
# Download this model to make sure it's in the cache.
A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=lowercase_ ) as mock_head:
A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
A__ = AutoConfig.from_pretrained('bert-base-cased' )
A__ = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase_ )
A__ = 2
json.dump(configuration.to_dict(),open(os.path.join(lowercase_,'config.4.0.0.json' ),'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(new_configuration.hidden_size,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ = ['config.42.0.0.json']
A__ = 7_6_8
configuration.save_pretrained(lowercase_ )
shutil.move(os.path.join(lowercase_,'config.4.0.0.json' ),os.path.join(lowercase_,'config.42.0.0.json' ) )
A__ = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(new_configuration.hidden_size,7_6_8 )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
A__ = 'v4.0.0'
A__ , A__ = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase_,return_unused_kwargs=lowercase_ )
self.assertEqual(new_configuration.hidden_size,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase_,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ = 'v3.0.0'
A__ = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(old_configuration.hidden_size,7_6_8 )
| 7 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : int ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : Dict=True ,lowercase__ : Any=True ,lowercase__ : Dict=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=9_9 ,lowercase__ : List[str]=2_4 ,lowercase__ : Optional[Any]=2 ,lowercase__ : str=6 ,lowercase__ : List[str]=3_7 ,lowercase__ : List[Any]="gelu" ,lowercase__ : str=0.1 ,lowercase__ : Tuple=0.1 ,lowercase__ : Dict=5_1_2 ,lowercase__ : List[Any]=1_6 ,lowercase__ : str=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : int=3 ,lowercase__ : Optional[int]=None ,lowercase__ : Dict=1_0_0_0 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
__lowercase = range_bbox
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase = bbox[i, j, 3]
__lowercase = bbox[i, j, 1]
__lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase = bbox[i, j, 2]
__lowercase = bbox[i, j, 0]
__lowercase = t
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : int ):
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,):
__lowercase = LiltModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,bbox=lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,bbox=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,bbox=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = self.num_labels
__lowercase = LiltForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,bbox=lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,):
__lowercase = LiltForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,bbox=lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : List[str] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[Any] ):
return True
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = LiltModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LiltModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(lowercase__ )
__lowercase = torch.tensor([[1, 2]] ,device=lowercase__ )
__lowercase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(input_ids=lowercase__ ,bbox=lowercase__ )
__lowercase = torch.Size([1, 2, 7_6_8] )
__lowercase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] ,device=lowercase__ ,)
self.assertTrue(outputs.last_hidden_state.shape ,lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,lowercase__ ,atol=1e-3 ) )
| 52 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
stooge(A__ , 0 , len(A__ ) - 1 )
return arr
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowercase , __lowercase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowercase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(A__ , i + t , (A__) )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 52 | 1 |
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case = str(abs(UpperCamelCase_ ) )
snake_case = [list(UpperCamelCase_ ) for char in range(len(UpperCamelCase_ ) )]
for index in range(len(UpperCamelCase_ ) ):
num_transpositions[index].pop(UpperCamelCase_ )
return max(
int(''''''.join(list(UpperCamelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 | 1 |
# Imports
import numpy as np
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Any:
self.set_matricies(red=UpperCamelCase_, green=UpperCamelCase_, blue=UpperCamelCase_, red_edge=UpperCamelCase_, nir=UpperCamelCase_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> int:
if red is not None:
UpperCamelCase : List[Any] = red
if green is not None:
UpperCamelCase : Any = green
if blue is not None:
UpperCamelCase : List[str] = blue
if red_edge is not None:
UpperCamelCase : Tuple = red_edge
if nir is not None:
UpperCamelCase : List[str] = nir
return True
def snake_case_ ( self, SCREAMING_SNAKE_CASE_="", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Dict:
self.set_matricies(red=UpperCamelCase_, green=UpperCamelCase_, blue=UpperCamelCase_, red_edge=UpperCamelCase_, nir=UpperCamelCase_ )
UpperCamelCase : Dict = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def snake_case_ ( self ) -> Any:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case_ ( self ) -> Union[str, Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case_ ( self ) -> Union[str, Any]:
return self.nir * (self.red / (self.green**2))
def snake_case_ ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case_ ( self ) -> Dict:
return (self.nir - self.red) / (self.nir + self.red)
def snake_case_ ( self ) -> List[Any]:
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case_ ( self ) -> Any:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case_ ( self ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green)
def snake_case_ ( self ) -> Union[str, Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case_ ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case_ ( self ) -> str:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case_ ( self ) -> str:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0.08, SCREAMING_SNAKE_CASE_=1.22, SCREAMING_SNAKE_CASE_=0.03 ) -> Optional[int]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case_ ( self ) -> Union[str, Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case_ ( self ) -> Optional[Any]:
return (self.nir / self.green) - 1
def snake_case_ ( self ) -> Tuple:
return (self.nir / self.redEdge) - 1
def snake_case_ ( self ) -> Tuple:
return (self.red - self.blue) / self.red
def snake_case_ ( self ) -> Any:
UpperCamelCase : int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case_ ( self ) -> Tuple:
return self.nir - self.green
def snake_case_ ( self ) -> Optional[int]:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0.16 ) -> str:
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0.5 ) -> Union[str, Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case_ ( self ) -> Tuple:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
return (self.nir - b) / (a * self.red)
def snake_case_ ( self ) -> Tuple:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case_ ( self ) -> List[str]:
return (self.red + self.green + self.blue) / 30.5
def snake_case_ ( self ) -> Any:
return self.nir / self.red
def snake_case_ ( self ) -> Optional[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case_ ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case_ ( self ) -> Optional[Any]:
return self.green / (self.nir + self.red + self.green)
def snake_case_ ( self ) -> Any:
return self.nir / (self.nir + self.red + self.green)
def snake_case_ ( self ) -> Optional[int]:
return self.red / (self.nir + self.red + self.green)
def snake_case_ ( self ) -> Optional[int]:
return (self.green - self.red) / (self.green + self.red)
def snake_case_ ( self ) -> Tuple:
return (self.red - self.green) / (self.red + self.green)
def snake_case_ ( self ) -> Any:
UpperCamelCase : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase : Tuple = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case_ ( self ) -> List[Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case_ ( self ) -> Optional[int]:
return self.nir / self.red
def snake_case_ ( self ) -> Union[str, Any]:
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case_ ( self ) -> Optional[Any]:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 357 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
UpperCamelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[int]:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : str = val
def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
UpperCamelCase : Tuple = qkv_bias
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : str = 364 if 'coco' in model_name else 224
UpperCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase : List[Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase : int = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase : int = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase : Any = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=None , snake_case__ : int=False ) -> List[Any]:
UpperCamelCase : str = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase : int = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
UpperCamelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval()
UpperCamelCase : Optional[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase : List[Any] = original_model.state_dict()
UpperCamelCase : Tuple = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
UpperCamelCase : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase : Tuple = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase : Dict = key.replace('t5' , 'language' )
UpperCamelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
UpperCamelCase , UpperCamelCase : Any = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase : List[str] = load_demo_image()
UpperCamelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCamelCase : Any = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
UpperCamelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
UpperCamelCase : Any = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : Optional[int] = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase : str = hf_model(snake_case__ , snake_case__ ).logits
else:
UpperCamelCase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase : Optional[int] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ )
else:
# cast to same type
UpperCamelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase : Optional[int] = ''
UpperCamelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
UpperCamelCase : str = original_model.generate({'image': original_pixel_values} )
UpperCamelCase : str = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
UpperCamelCase : Optional[int] = input_ids.shape[1]
UpperCamelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
UpperCamelCase : Dict = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A ( UpperCamelCase_ : dict[int, list[int]]) -> list[tuple[int, int]]:
'''simple docstring'''
__lowercase = 0
__lowercase = len(UpperCamelCase_) # No of vertices in graph
__lowercase = [0] * n
__lowercase = [False] * n
def dfs(UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Any, UpperCamelCase_ : Optional[Any]):
__lowercase = True
__lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, id_)
__lowercase = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
__lowercase = min(low[at], low[to])
__lowercase = []
for i in range(UpperCamelCase_):
if not visited[i]:
dfs(UpperCamelCase_, -1, UpperCamelCase_, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 352 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 0 |
import cva
import numpy as np
class __lowercase :
def __init__( self , A_ , A_ ) ->Any:
'''simple docstring'''
if k in (0.04, 0.06):
__lowerCAmelCase : List[Any] = k
__lowerCAmelCase : List[Any] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) ->Optional[Any]:
'''simple docstring'''
return str(self.k )
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = cva.imread(_lowercase , 0 )
__lowerCAmelCase, __lowerCAmelCase : Dict = img.shape
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Any = img.copy()
__lowerCAmelCase : Tuple = cva.cvtColor(_lowercase , cva.COLOR_GRAY2RGB )
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = np.gradient(_lowercase )
__lowerCAmelCase : Tuple = dx**2
__lowerCAmelCase : Optional[Any] = dy**2
__lowerCAmelCase : Tuple = dx * dy
__lowerCAmelCase : str = 0.04
__lowerCAmelCase : Any = self.window_size // 2
for y in range(_lowercase , h - offset ):
for x in range(_lowercase , w - offset ):
__lowerCAmelCase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Union[str, Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : List[Any] = (wxx * wyy) - (wxy**2)
__lowerCAmelCase : Tuple = wxx + wyy
__lowerCAmelCase : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCamelCase = HarrisCorner(0.04, 3)
_UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 275 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
a__ : int
a__ : Node | None = None
a__ : Node | None = None
def lowercase__ ( ):
__UpperCAmelCase = Node(1 )
__UpperCAmelCase = Node(2 )
__UpperCAmelCase = Node(3 )
__UpperCAmelCase = Node(4 )
__UpperCAmelCase = Node(5 )
return tree
def lowercase__ ( snake_case_ :Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase__ ( snake_case_ :Node | None ):
__UpperCAmelCase = []
if root is None:
return output
__UpperCAmelCase = deque([root] )
while process_queue:
__UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ):
__UpperCAmelCase = []
def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ):
__UpperCAmelCase = []
def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def lowercase__ ( snake_case_ :Node | None ):
if root is None:
return []
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = height(snake_case_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) )
__UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) )
__UpperCAmelCase = 0
return output
def lowercase__ ( ): # Main function for testing.
__UpperCAmelCase = make_tree()
print(F'''In-order Traversal: {inorder(snake_case_ )}''' )
print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' )
print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' )
print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(snake_case_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(snake_case_ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase : Optional[int] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : List[str] = logging.getLogger()
def __snake_case ( ) -> Tuple:
A_ : Any = argparse.ArgumentParser()
parser.add_argument("-f" )
A_ : Tuple = parser.parse_args()
return args.f
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int]="eval" ) -> Union[str, Any]:
A_ : Any = os.path.join(_lowerCAmelCase , f"{split}_results.json" )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , "r" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(f"can't find {path}" )
_lowerCAmelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Any = self.get_auto_remove_tmp_dir()
A_ : Tuple = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_flax_glue.main()
A_ : Any = get_results(snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Any = self.get_auto_remove_tmp_dir()
A_ : Tuple = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_clm_flax.main()
A_ : str = get_results(snake_case )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.get_auto_remove_tmp_dir()
A_ : Any = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_summarization_flax.main()
A_ : str = get_results(snake_case , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : Dict = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_mlm_flax.main()
A_ : str = get_results(snake_case )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : List[Any] = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_ta_mlm_flax.main()
A_ : Optional[int] = get_results(snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = 7 if get_gpu_count() > 1 else 2
A_ : Any = self.get_auto_remove_tmp_dir()
A_ : str = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_flax_ner.main()
A_ : Union[str, Any] = get_results(snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(snake_case , "argv" , snake_case ):
run_qa.main()
A_ : Tuple = get_results(snake_case )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 70 |
from collections.abc import Sequence
def __snake_case ( _lowerCAmelCase : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A_ : Any = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
A_ : Any = nums[i]
A_ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowerCAmelCase : List[Any] = int(input('''Enter number of elements : ''').strip())
_lowerCAmelCase : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 70 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE__ : Tuple = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """albert"""
def __init__( self , UpperCamelCase__=3_0000 , UpperCamelCase__=128 , UpperCamelCase__=4096 , UpperCamelCase__=12 , UpperCamelCase__=1 , UpperCamelCase__=64 , UpperCamelCase__=1_6384 , UpperCamelCase__=1 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0.1 , UpperCamelCase__="absolute" , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=3 , **UpperCamelCase__ , ) -> Dict:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Union[str, Any] = embedding_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : str = num_hidden_groups
lowerCamelCase : str = num_attention_heads
lowerCamelCase : str = inner_group_num
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : int = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : List[Any] = classifier_dropout_prob
lowerCamelCase : Dict = position_embedding_type
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 48 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase : Union[str, Any] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_UpperCAmelCase : int = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
_UpperCAmelCase : Any = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__lowercase )]
__lowerCAmelCase = TER(
normalized=__lowercase , no_punct=__lowercase , asian_support=__lowercase , case_sensitive=__lowercase , )
__lowerCAmelCase = sb_ter.corpus_score(__lowercase , __lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 174 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
@property
def __A ( self ) -> int:
return self.feat_extract_tester.prepare_feat_extract_dict()
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'padding_value' ) )
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __A ( self , lowerCAmelCase__=False ) -> List[Any]:
def _inputs_have_equal_length(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase__ , lowerCAmelCase__ ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE = self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE = self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE = self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='np' , )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def __A ( self , lowerCAmelCase__=False ) -> str:
def _inputs_have_equal_length(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase__ , lowerCAmelCase__ ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to middle
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase , return_tensors='np' , )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='longest' , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='longest' , truncation=_UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
def __A ( self ) -> List[Any]:
self._check_padding(numpify=_UpperCAmelCase )
def __A ( self ) -> Optional[Any]:
self._check_padding(numpify=_UpperCAmelCase )
def __A ( self ) -> List[str]:
self._check_truncation(numpify=_UpperCAmelCase )
def __A ( self ) -> Any:
self._check_truncation(numpify=_UpperCAmelCase )
@require_torch
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.feat_extract_dict
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = [len(_UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.feat_extract_dict
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = [len(_UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase )
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 366 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__UpperCamelCase = {'''mgp-str''': 27}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[s]" , lowerCAmelCase__="[GO]" , **lowerCAmelCase__ ) -> int:
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
@property
def __A ( self ) -> List[str]:
return len(self.vocab )
def __A ( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def __A ( self , lowerCAmelCase__ ) -> int:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __A ( self , lowerCAmelCase__ ) -> int:
return self.decoder.get(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
return (vocab_file,)
| 38 | 0 |
def lowerCAmelCase__ ( a__: list ) -> list:
'''simple docstring'''
if any(not isinstance(a__ , a__ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(a__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 329 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 1 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
lowerCAmelCase : Any = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__A : Any = []
__A : Optional[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__A : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
__A : Tuple = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__A : Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__A : Union[str, Any] = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'Following is minimal change for {value}: ')
__A : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 360 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
A__ : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
A__ : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Benchmark training of model'''} )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Verbose memory tracing'''} )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Trace memory line by line'''} )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Save result to a CSV file'''} )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Save all print statements in a log file'''} )
A__ : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to print environment information'''} )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
A__ : str = field(
default=f"inference_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
A__ : str = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
A__ : str = field(
default=f"train_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
A__ : str = field(
default=f"train_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
A__ : str = field(
default=f"env_info_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
A__ : str = field(
default=f"log_{round(time() )}.csv" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
A__ : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
A__ : bool = field(
default=lowerCAmelCase_ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case_ ( self : int ):
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _snake_case , )
def snake_case_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self : int ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case_ ( self : Optional[Any] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 156 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[torch.FloatTensor] = None
A__ : torch.FloatTensor = None
A__ : Optional[Tuple[torch.FloatTensor]] = None
A__ : Optional[Tuple[torch.FloatTensor]] = None
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int=1 , _snake_case : int=0 , _snake_case : List[str]=2 , _snake_case : List[str]=512 , _snake_case : Tuple="cls" , _snake_case : Union[str, Any]=False , _snake_case : str=True , **_snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : Union[str, Any] = project_dim
__lowercase : str = pooler_fn
__lowercase : List[str] = learn_encoder
__lowercase : int = use_attention_mask
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = [r'''pooler''', r'''logit_scale''']
A__ : Dict = [r'''position_ids''', r'''predictions.decoder.bias''']
A__ : Union[str, Any] = '''roberta'''
A__ : str = RobertaSeriesConfig
def __init__( self : List[str] , _snake_case : Any ):
super().__init__(_snake_case )
__lowercase : Union[str, Any] = XLMRobertaModel(_snake_case )
__lowercase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Optional[int] = getattr(_snake_case , '''has_pre_transformation''' , _snake_case )
if self.has_pre_transformation:
__lowercase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Any = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case_ ( self : Dict , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , ):
__lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Any = self.base_model(
input_ids=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_attentions=_snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_snake_case , )
if self.has_pre_transformation:
__lowercase : Optional[int] = outputs['''hidden_states'''][-2]
__lowercase : Union[str, Any] = self.pre_LN(_snake_case )
__lowercase : Optional[int] = self.transformation_pre(_snake_case )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase : str = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 156 | 1 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=lowerCAmelCase_ )
A__ : list = field(default_factory=lowerCAmelCase_ )
def snake_case_ ( self : List[Any] , _snake_case : str , _snake_case : Tensor , _snake_case : Tensor ):
__lowercase : Dict = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_snake_case )
def __call__( self : Tuple , _snake_case : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_snake_case )
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self : Tuple ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : nn.Module
A__ : nn.Module
A__ : int = 1
A__ : List = field(default_factory=lowerCAmelCase_ )
A__ : List = field(default_factory=lowerCAmelCase_ )
A__ : bool = True
def __call__( self : int , _snake_case : Tensor ):
__lowercase : Optional[Any] = Tracker(self.dest )(_snake_case ).parametrized
__lowercase : int = Tracker(self.src )(_snake_case ).parametrized
__lowercase : Optional[int] = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) )
__lowercase : List[Any] = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) )
if len(_snake_case ) != len(_snake_case ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(_snake_case )} operations while'
F' destination module has {len(_snake_case )}.' )
for dest_m, src_m in zip(_snake_case , _snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : nn.Module ):
super().__init__()
__lowercase : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), F'Unexpected layer name {k}'
__lowercase : int = len(_snake_case ) + 1
feature_blocks.append((F'res{block_index}', v) )
__lowercase : Optional[int] = nn.ModuleDict(_snake_case )
def snake_case_ ( self : Dict , _snake_case : Tensor ):
return get_trunk_forward_outputs(
_snake_case , out_feat_keys=_snake_case , feature_blocks=self._feature_blocks , )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def snake_case_ ( self : Optional[int] , _snake_case : str ):
__lowercase : Union[str, Any] = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] , _snake_case : str ):
# default to timm!
if x not in self:
__lowercase : List[Any] = self.convert_name_to_timm(_snake_case )
__lowercase : str = partial(lambda: (timm.create_model(_snake_case , pretrained=_snake_case ).eval(), None) )
else:
__lowercase : Optional[Any] = super().__getitem__(_snake_case )
return val
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __getitem__( self : Optional[Any] , _snake_case : str ):
if "seer" in x and "in1k" not in x:
__lowercase : List[Any] = RegNetModel
else:
__lowercase : Dict = RegNetForImageClassification
return val
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
for from_key, to_key in keys:
__lowercase : Tuple = from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ) -> Any:
print(F'Converting {name}...' )
with torch.no_grad():
__lowercase , __lowercase : str = from_model_func()
__lowercase : int = our_model_func(__lowerCAmelCase ).eval()
__lowercase : Optional[Any] = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase , raise_if_mismatch=__lowerCAmelCase )
__lowercase : Dict = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
if from_state_dict is not None:
__lowercase : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowercase : Dict = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__lowercase : Dict = manually_copy_vissl_head(__lowerCAmelCase , our_model.state_dict() , __lowerCAmelCase )
our_model.load_state_dict(__lowerCAmelCase )
__lowercase : List[Any] = our_model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
__lowercase : int = (
our_outputs.logits if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else our_outputs.last_hidden_state
)
__lowercase : Union[str, Any] = from_model(__lowerCAmelCase )
__lowercase : List[str] = from_output[-1] if type(__lowerCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowercase : List[str] = our_outputs.hidden_states[-1]
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=__lowerCAmelCase , )
__lowercase : Tuple = 224 if '''seer''' not in name else 384
# we can use the convnext one
__lowercase : Dict = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=__lowerCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=__lowerCAmelCase , )
print(F'Pushed {name}' )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> Optional[int]:
__lowercase : int = '''imagenet-1k-id2label.json'''
__lowercase : int = 1_000
__lowercase : List[str] = (1, num_labels)
__lowercase : Optional[Any] = '''huggingface/label-files'''
__lowercase : Tuple = num_labels
__lowercase : Any = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase : Dict = idalabel
__lowercase : List[Any] = {v: k for k, v in idalabel.items()}
__lowercase : str = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
__lowercase : Dict = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
__lowercase : int = NameToOurModelFuncMap()
__lowercase : Tuple = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[nn.Module, Dict]:
__lowercase : Optional[Any] = torch.hub.load_state_dict_from_url(__lowerCAmelCase , model_dir=str(__lowerCAmelCase ) , map_location='''cpu''' )
__lowercase : List[str] = model_func()
# check if we have a head, if yes add it
__lowercase : List[Any] = files['''classy_state_dict''']['''base_model''']['''model''']
__lowercase : Any = model_state_dict['''trunk''']
model.load_state_dict(__lowerCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowercase : Union[str, Any] = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowercase : int = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowercase : int = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowercase : Optional[int] = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__lowercase : Dict = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowercase : List[Any] = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowercase : Union[str, Any] = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowercase : Dict = partial(
__lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 156 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 156 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return np.dot(UpperCamelCase_ ,UpperCamelCase_ )
class A__ :
"""simple docstring"""
def __init__( self , *,
__snake_case = np.inf , __snake_case = "linear" , __snake_case = 0.0 , ):
snake_case = regularization
snake_case = gamma
if kernel == "linear":
snake_case = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case = F'''Unknown kernel: {kernel}'''
raise ValueError(__snake_case )
def a_ ( self , __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case )
def a_ ( self , __snake_case , __snake_case ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a_ ( self , __snake_case , __snake_case ):
snake_case = observations
snake_case = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case) , ) = np.shape(__snake_case )
def to_minimize(__snake_case ) -> float:
snake_case = 0
((snake_case) , ) = np.shape(__snake_case )
for i in range(__snake_case ):
for j in range(__snake_case ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__snake_case )
snake_case = LinearConstraint(__snake_case , 0 , 0 )
snake_case = Bounds(0 , self.regularization )
snake_case = minimize(
__snake_case , np.ones(__snake_case ) , bounds=__snake_case , constraints=[ly_contraint] ).x
snake_case = l_star
# calculating mean offset of separation plane to points
snake_case = 0
for i in range(__snake_case ):
for j in range(__snake_case ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case = s / n
def a_ ( self , __snake_case ):
snake_case = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __snake_case )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 |
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 213 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase__ ( __UpperCamelCase )-> Optional[int]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase = model_type_to_module_name(__lowercase )
UpperCamelCase = importlib.import_module(F".{module_name}" , """transformers.models""" )
try:
return getattr(__lowercase , __lowercase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowercase , """__name__""" , __lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase = importlib.import_module("""transformers""" )
if hasattr(__lowercase , __lowercase ):
return getattr(__lowercase , __lowercase )
return None
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , )-> Tuple:
UpperCamelCase = get_file_from_repo(
__lowercase , __lowercase , cache_dir=__lowercase , force_download=__lowercase , resume_download=__lowercase , proxies=__lowercase , use_auth_token=__lowercase , revision=__lowercase , local_files_only=__lowercase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__lowercase , encoding="""utf-8""" ) as reader:
return json.load(__lowercase )
class a_ :
def __init__( self ) -> Any:
"""simple docstring"""
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def A__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = kwargs.pop("""config""" , __UpperCAmelCase )
UpperCamelCase = kwargs.pop("""trust_remote_code""" , __UpperCAmelCase )
UpperCamelCase = True
UpperCamelCase ,UpperCamelCase = ImageProcessingMixin.get_image_processor_dict(__UpperCAmelCase , **__UpperCAmelCase )
UpperCamelCase = config_dict.get("""image_processor_type""" , __UpperCAmelCase )
UpperCamelCase = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase = config_dict.pop("""feature_extractor_type""" , __UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.image_processor_type``
UpperCamelCase = getattr(__UpperCAmelCase , """image_processor_type""" , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase = image_processor_class_from_name(__UpperCAmelCase )
UpperCamelCase = image_processor_auto_map is not None
UpperCamelCase = image_processor_class is not None or type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
UpperCamelCase = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
UpperCamelCase = kwargs.pop("""code_revision""" , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase = IMAGE_PROCESSOR_MAPPING[type(__UpperCAmelCase )]
return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 321 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
_A = sylvester(number - 1 )
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 79 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowercase: int = TypeVar("T")
__lowercase: List[str] = Union[List[T], Tuple[T, ...]]
__lowercase: List[Any] = Union[T, List[T], Dict[str, T]]
__lowercase: Tuple = Union[str, bytes, os.PathLike] | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ = get_logger(__name__)
lowerCamelCase_ = r'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'''
class UpperCamelCase_ :
@add_start_docstrings(__UpperCAmelCase )
def __call__( self : Optional[Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase_ :
@add_start_docstrings(__UpperCAmelCase )
def __call__( self : Tuple , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase_ (UpperCAmelCase_ ):
@add_start_docstrings(__UpperCAmelCase )
def __call__( self : Tuple , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int , **lowerCAmelCase_ : str ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase_ : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(__UpperCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
UpperCAmelCase_ : Union[str, Any] = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
else:
UpperCAmelCase_ : List[str] = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Any , lowerCAmelCase_ : float ) -> Union[str, Any]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
UpperCAmelCase_ : str = temperature
def __call__( self : Union[str, Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Dict = scores / self.temperature
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : float = -float("Inf" ) , lowerCAmelCase_ : int = 1 ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
UpperCAmelCase_ : Dict = top_p
UpperCAmelCase_ : Optional[Any] = filter_value
UpperCAmelCase_ : Dict = min_tokens_to_keep
def __call__( self : Any , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : str = lax.top_k(__UpperCAmelCase , scores.shape[-1] )
UpperCAmelCase_ : str = jnp.full_like(__UpperCAmelCase , self.filter_value )
UpperCAmelCase_ : Optional[int] = jax.nn.softmax(__UpperCAmelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase_ : List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase_ : List[str] = jnp.roll(__UpperCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__UpperCAmelCase )
# min tokens to keep
UpperCAmelCase_ : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCAmelCase )
UpperCAmelCase_ : int = jnp.where(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jax.lax.sort_key_val(__UpperCAmelCase , __UpperCAmelCase )[-1]
return next_scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : float = -float("Inf" ) , lowerCAmelCase_ : int = 1 ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
UpperCAmelCase_ : List[Any] = max(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[Any] = filter_value
def __call__( self : Any , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = scores.shape
UpperCAmelCase_ : str = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase_ : str = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = lax.top_k(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jnp.broadcast_to((jnp.arange(__UpperCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase_ : List[Any] = topk_scores.flatten()
UpperCAmelCase_ : str = topk_indices.flatten() + shift
UpperCAmelCase_ : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(__UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = next_scores_flat.reshape(__UpperCAmelCase , __UpperCAmelCase )
return next_scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> Tuple:
UpperCAmelCase_ : Tuple = bos_token_id
def __call__( self : Optional[Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Union[str, Any] = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase_ : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase_ : Tuple = jnp.where(__UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Union[str, Any]:
UpperCAmelCase_ : str = max_length
UpperCAmelCase_ : Tuple = eos_token_id
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Any = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase_ : int = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase_ : List[str] = jnp.where(__UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Tuple:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
UpperCAmelCase_ : int = min_length
UpperCAmelCase_ : int = eos_token_id
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase_ : Optional[Any] = jnp.where(__UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[int] = list(__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = begin_index
def __call__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> str:
UpperCAmelCase_ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase_ : List[Any] = jnp.where(__UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : list ) -> str:
UpperCAmelCase_ : List[str] = list(__UpperCAmelCase )
def __call__( self : List[str] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Optional[int] = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
UpperCAmelCase_ : str = dict(__UpperCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase_ : Union[str, Any] = force_token_array.at[index].set(__UpperCAmelCase )
UpperCAmelCase_ : List[str] = jnp.intaa(__UpperCAmelCase )
def __call__( self : List[str] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
def _force_token(lowerCAmelCase_ : List[Any] ):
UpperCAmelCase_ : str = scores.shape[0]
UpperCAmelCase_ : List[Any] = self.force_token_array[generation_idx]
UpperCAmelCase_ : Any = jnp.ones_like(__UpperCAmelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase_ : Union[str, Any] = lax.dynamic_update_slice(__UpperCAmelCase , __UpperCAmelCase , (0, current_token) )
return new_scores
UpperCAmelCase_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__UpperCAmelCase ) , lambda: scores , ) , )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = generate_config.eos_token_id
UpperCAmelCase_ : Optional[int] = generate_config.no_timestamps_token_id
UpperCAmelCase_ : List[str] = generate_config.no_timestamps_token_id + 1
UpperCAmelCase_ : Optional[int] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__UpperCAmelCase , "max_initial_timestamp_index" ):
UpperCAmelCase_ : int = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase_ : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase_ : Optional[Any] = model_config.vocab_size
def __call__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ):
UpperCAmelCase_ : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __UpperCAmelCase , )
UpperCAmelCase_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __UpperCAmelCase , __UpperCAmelCase , )
return jnp.where(
__UpperCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __UpperCAmelCase , )
UpperCAmelCase_ : Optional[int] = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[str] = jnp.where(cur_len == self.begin_index , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __UpperCAmelCase , )
UpperCAmelCase_ : str = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase_ : Dict = jnp.where(
__UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __UpperCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase_ : str = jax.nn.log_softmax(__UpperCAmelCase , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
UpperCAmelCase_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase_ : Any = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __UpperCAmelCase , )
UpperCAmelCase_ : Union[str, Any] = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase , __UpperCAmelCase )
return scores
| 268 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__A : Optional[int] = {
# 1536-bit
5: {
'''prime''': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'''generator''': 2,
},
}
class __UpperCamelCase :
def __init__( self :Tuple ,_UpperCamelCase :int = 1_4 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
snake_case_ : List[Any] = primes[group]["""prime"""]
snake_case_ : Any = primes[group]["""generator"""]
snake_case_ : Tuple = int(hexlify(urandom(3_2 ) ) ,base=1_6 )
def a__ ( self :List[str] ):
return hex(self.__private_key )[2:]
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = pow(self.generator ,self.__private_key ,self.prime )
return hex(__snake_case )[2:]
def a__ ( self :Optional[int] ,_UpperCamelCase :int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__snake_case ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def a__ ( self :Optional[Any] ,_UpperCamelCase :str ):
snake_case_ : Dict = int(__snake_case ,base=1_6 )
if not self.is_valid_public_key(__snake_case ):
raise ValueError("""Invalid public key""" )
snake_case_ : int = pow(__snake_case ,self.__private_key ,self.prime )
return shaaaa(str(__snake_case ).encode() ).hexdigest()
@staticmethod
def a__ ( _UpperCamelCase :int ,_UpperCamelCase :int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__snake_case ,(prime - 1) // 2 ,__snake_case ) == 1
)
@staticmethod
def a__ ( _UpperCamelCase :str ,_UpperCamelCase :str ,_UpperCamelCase :int = 1_4 ):
snake_case_ : Union[str, Any] = int(__snake_case ,base=1_6 )
snake_case_ : Optional[int] = int(__snake_case ,base=1_6 )
snake_case_ : Optional[Any] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(__snake_case ,__snake_case ):
raise ValueError("""Invalid public key""" )
snake_case_ : Optional[Any] = pow(__snake_case ,__snake_case ,__snake_case )
return shaaaa(str(__snake_case ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 370 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : str = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : int = min_resolution
snake_case_ : int = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = apply_ocr
def a__ ( self :Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self :int ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Any ):
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
def a__ ( self :Optional[Any] ):
pass
def a__ ( self :Union[str, Any] ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_UpperCamelCase )
self.assertIsInstance(encoding.boxes ,_UpperCamelCase )
# Test batched
snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Tuple ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
# Initialize image_processing
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[Any] ):
# with apply_OCR = True
snake_case_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCamelCase )
self.assertListEqual(encoding.boxes ,_UpperCamelCase )
# with apply_OCR = False
snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) | 8 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=A_ , speech_processor=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , feature_extractor=A_ , )
def __UpperCamelCase( self , A_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.enable_attention_slicing(A_ )
@torch.no_grad()
def __call__( self , A_ , A_=1_6000 , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = self.speech_processor.feature_extractor(
A_ , return_tensors="pt" , sampling_rate=A_ ).input_features.to(self.device )
UpperCamelCase : Union[str, Any] = self.speech_model.generate(A_ , max_length=48_0000 )
UpperCamelCase : List[str] = self.speech_processor.tokenizer.batch_decode(A_ , skip_special_tokens=A_ , normalize=A_ )[
0
]
if isinstance(A_ , A_ ):
UpperCamelCase : List[Any] = 1
elif isinstance(A_ , A_ ):
UpperCamelCase : Tuple = len(A_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A_ )}.""" )
# get prompt text embeddings
UpperCamelCase : str = self.tokenizer(
A_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : str = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = text_embeddings.shape
UpperCamelCase : Any = text_embeddings.repeat(1 , A_ , 1 )
UpperCamelCase : str = text_embeddings.view(bs_embed * num_images_per_prompt , A_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Dict = [""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
F""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
UpperCamelCase : Any = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
UpperCamelCase : int = negative_prompt
UpperCamelCase : str = text_input_ids.shape[-1]
UpperCamelCase : Union[str, Any] = self.tokenizer(
A_ , padding="max_length" , max_length=A_ , truncation=A_ , return_tensors="pt" , )
UpperCamelCase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Tuple = uncond_embeddings.shape[1]
UpperCamelCase : List[str] = uncond_embeddings.repeat(1 , A_ , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Optional[Any] = torch.randn(A_ , generator=A_ , device="cpu" , dtype=A_ ).to(
self.device )
else:
UpperCamelCase : Dict = torch.randn(A_ , generator=A_ , device=self.device , dtype=A_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Optional[Any] = {}
if accepts_eta:
UpperCamelCase : Tuple = eta
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : int = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
UpperCamelCase : str = self.unet(A_ , A_ , encoder_hidden_states=A_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : List[Any] = noise_pred.chunk(2 )
UpperCamelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : List[str] = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase : List[str] = 1 / 0.1_82_15 * latents
UpperCamelCase : List[str] = self.vae.decode(A_ ).sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(A_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A_ , nsfw_content_detected=A_ )
| 52 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase_ = '▁'
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : Optional[Any] = BarthezTokenizer
def __init__( self : int , _A : int=None , _A : List[Any]=None , _A : str="<s>" , _A : Dict="</s>" , _A : List[Any]="</s>" , _A : Union[str, Any]="<s>" , _A : List[str]="<unk>" , _A : Dict="<pad>" , _A : int="<mask>" , **_A : str , ) -> Tuple:
"""simple docstring"""
lowercase : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , )
lowercase : Union[str, Any] = vocab_file
lowercase : List[str] = False if not self.vocab_file else True
def __a ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : List[Any] = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Union[str, Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,) | 357 |
lowerCAmelCase_ = range(2, 20 + 1)
lowerCAmelCase_ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase_ = {}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : str = sum(a_i[j] for j in range(__magic_name__ , len(__magic_name__ ) ) )
lowercase : Any = sum(a_i[j] * base[j] for j in range(min(len(__magic_name__ ) , __magic_name__ ) ) )
lowercase , lowercase : Optional[int] = 0, 0
lowercase : str = n - i
lowercase : Optional[int] = memo.get(__magic_name__ )
if sub_memo is not None:
lowercase : List[str] = sub_memo.get(__magic_name__ )
if jumps is not None and len(__magic_name__ ) > 0:
# find and make the largest jump without going over
lowercase : Dict = -1
for _k in range(len(__magic_name__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase : Any = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase : str = diff + c
for j in range(min(__magic_name__ , len(__magic_name__ ) ) ):
lowercase , lowercase : Optional[Any] = divmod(__magic_name__ , 10 )
if new_c > 0:
add(__magic_name__ , __magic_name__ , __magic_name__ )
else:
lowercase : Dict = []
else:
lowercase : Union[str, Any] = {c: []}
lowercase : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase : str = next_term(__magic_name__ , k - 1 , i + dn , __magic_name__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase : Optional[Any] = compute(__magic_name__ , __magic_name__ , i + dn , __magic_name__ )
diff += _diff
dn += terms_jumped
lowercase : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase : List[Any] = 0
while j < len(__magic_name__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__magic_name__ , (diff, dn, k) )
return (diff, dn)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__magic_name__ ):
a_i.extend([0 for _ in range(k - len(__magic_name__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase : Optional[Any] = i
lowercase , lowercase , lowercase : List[str] = 0, 0, 0
for j in range(len(__magic_name__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase : List[str] = ds_c + ds_b
diff += addend
lowercase : Tuple = 0
for j in range(__magic_name__ ):
lowercase : int = a_i[j] + addend
lowercase , lowercase : Any = divmod(__magic_name__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__magic_name__ , __magic_name__ , __magic_name__ )
return diff, i - start_i
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
for j in range(__magic_name__ , len(__magic_name__ ) ):
lowercase : Any = digits[j] + addend
if s >= 10:
lowercase , lowercase : List[str] = divmod(__magic_name__ , 10 )
lowercase : List[str] = addend // 10 + quotient
else:
lowercase : Optional[Any] = s
lowercase : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase : str = divmod(__magic_name__ , 10 )
digits.append(__magic_name__ )
def snake_case( __magic_name__ = 10**15 ) -> int:
'''simple docstring'''
lowercase : List[Any] = [1]
lowercase : List[Any] = 1
lowercase : str = 0
while True:
lowercase , lowercase : str = next_term(__magic_name__ , 20 , i + dn , __magic_name__ )
dn += terms_jumped
if dn == n - i:
break
lowercase : str = 0
for j in range(len(__magic_name__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''') | 116 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase_ ( snake_case_,snake_case_ = 16 ):
_A : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_A : Any = load_dataset("""glue""","""mrpc""" )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
_A : int = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=snake_case_,max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A : Dict = datasets.map(
snake_case_,batched=snake_case_,remove_columns=["""idx""", """sentence1""", """sentence2"""],)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : List[Any] = tokenized_datasets.rename_column("""label""","""labels""" )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A : Any = 16
elif accelerator.mixed_precision != "no":
_A : int = 8
else:
_A : str = None
return tokenizer.pad(
snake_case_,padding="""longest""",max_length=snake_case_,pad_to_multiple_of=snake_case_,return_tensors="""pt""",)
# Instantiate dataloaders.
_A : Dict = DataLoader(
tokenized_datasets["""train"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ )
_A : str = DataLoader(
tokenized_datasets["""validation"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""",snake_case_ ) == "1":
_A : Optional[Any] = 2
# New Code #
_A : str = int(args.gradient_accumulation_steps )
# Initialize accelerator
_A : Dict = Accelerator(
cpu=args.cpu,mixed_precision=args.mixed_precision,gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : int = config["""lr"""]
_A : str = int(config["""num_epochs"""] )
_A : Tuple = int(config["""seed"""] )
_A : Optional[Any] = int(config["""batch_size"""] )
_A : int = evaluate.load("""glue""","""mrpc""" )
set_seed(snake_case_ )
_A , _A : Dict = get_dataloaders(snake_case_,snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""",return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_A : Any = AdamW(params=model.parameters(),lr=snake_case_ )
# Instantiate scheduler
_A : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case_,num_warmup_steps=100,num_training_steps=(len(snake_case_ ) * num_epochs),)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A : List[Any] = accelerator.prepare(
snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
_A : List[Any] = model(**snake_case_ )
_A : Dict = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A : Dict = model(**snake_case_ )
_A : Any = outputs.logits.argmax(dim=-1 )
_A , _A : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case_,references=snake_case_,)
_A : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''',snake_case_ )
def lowerCAmelCase_ ( ):
_A : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""",type=snake_case_,default=snake_case_,choices=["""no""", """fp16""", """bf16""", """fp8"""],help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""",)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""",type=snake_case_,default=1,help="""The number of minibatches to be ran before gradients are accumulated.""",)
parser.add_argument("""--cpu""",action="""store_true""",help="""If passed, will train on the CPU.""" )
_A : Tuple = parser.parse_args()
_A : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case_,snake_case_ )
if __name__ == "__main__":
main()
| 26 |
import argparse
import os
import re
import packaging.version
A__ : Dict = '''examples/'''
A__ : Any = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def UpperCamelCase( ):
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Dict=False ):
lowerCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : List[str] = default_version.base_version
elif patch:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : List[str] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase( ):
lowerCAmelCase_ : Any = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 103 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowercase = logging.WARNING
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = os.getenv('''DATASETS_VERBOSITY''', UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_ ( ):
'''simple docstring'''
return __name__.split('''.''' )[0]
def lowerCamelCase_ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if name is None:
UpperCamelCase__ = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = False
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowercase :
'''simple docstring'''
def __init__( self : int , *_a : Any , **_a : int ): # pylint: disable=unused-argument
UpperCamelCase__ = args[0] if args else None
def __iter__( self : Union[str, Any] ):
return iter(self._iterator )
def __getattr__( self : Optional[Any] , _a : int ):
def empty_fn(*_a : List[str] , **_a : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
return self
def __exit__( self : Any , _a : Tuple , _a : List[Any] , _a : Union[str, Any] ):
return
lowercase = True
class __lowercase :
'''simple docstring'''
def __call__( self : List[Any] , *_a : Optional[Any] , _a : List[str]=False , **_a : List[Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_a , **_a )
else:
return EmptyTqdm(*_a , **_a )
def A_ ( self : List[Any] , *_a : Optional[Any] , **_a : Optional[int] ):
UpperCamelCase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a , **_a )
def A_ ( self : Any ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase = _tqdm_cls()
def lowerCamelCase_ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_ ( ):
'''simple docstring'''
global _tqdm_active
UpperCamelCase__ = True
def lowerCamelCase_ ( ):
'''simple docstring'''
global _tqdm_active
UpperCamelCase__ = False
| 35 | from __future__ import annotations
from collections import Counter
from random import random
class __lowercase :
'''simple docstring'''
def __init__( self : List[Any] ):
UpperCamelCase__ = {}
def A_ ( self : List[Any] , _a : str ):
UpperCamelCase__ = {}
def A_ ( self : List[Any] , _a : str , _a : str , _a : float ):
if nodea not in self.connections:
self.add_node(_a )
if nodea not in self.connections:
self.add_node(_a )
UpperCamelCase__ = probability
def A_ ( self : Optional[Any] ):
return list(self.connections )
def A_ ( self : Tuple , _a : str ):
UpperCamelCase__ = 0
UpperCamelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : list[tuple[str, str, float]], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = Counter(graph.get_nodes() )
UpperCamelCase__ = start
for _ in range(UpperCamelCase__ ):
UpperCamelCase__ = graph.transition(UpperCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=8 ) -> Optional[Any]:
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[str]:
super().__init__()
self.register_modules(
text_encoder=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, movq=lowerCAmelCase__, )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if latents is None:
snake_case_ = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__)
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}')
snake_case_ = latents.to(lowerCAmelCase__)
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, ) -> int:
snake_case_ = len(lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCAmelCase__, padding='max_length', truncation=lowerCAmelCase__, max_length=77, return_attention_mask=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors='pt', )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCAmelCase__, padding='longest', return_tensors='pt').input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
snake_case_ = text_input_ids.to(lowerCAmelCase__)
snake_case_ = text_inputs.attention_mask.to(lowerCAmelCase__)
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCAmelCase__, attention_mask=lowerCAmelCase__)
snake_case_ = prompt_embeds.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = text_mask.repeat_interleave(lowerCAmelCase__, dim=0)
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [''] * batch_size
elif type(lowerCAmelCase__) is not type(lowerCAmelCase__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__)} !='
f' {type(lowerCAmelCase__)}.')
elif isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCAmelCase__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.')
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCAmelCase__, padding='max_length', max_length=77, truncation=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors='pt', )
snake_case_ = uncond_input.input_ids.to(lowerCAmelCase__)
snake_case_ = uncond_input.attention_mask.to(lowerCAmelCase__)
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCAmelCase__, attention_mask=lowerCAmelCase__)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1, lowerCAmelCase__)
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt, lowerCAmelCase__)
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1, lowerCAmelCase__, 1)
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt, lowerCAmelCase__, -1)
snake_case_ = uncond_text_mask.repeat_interleave(lowerCAmelCase__, dim=0)
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds])
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
snake_case_ = torch.cat([uncond_text_mask, text_mask])
return prompt_embeds, text_encoder_hidden_states, text_mask
def a_ ( self, lowerCAmelCase__=0) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
snake_case_ = torch.device(f'cuda:{gpu_id}')
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__=0) -> int:
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
snake_case_ = torch.device(f'cuda:{gpu_id}')
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=lowerCAmelCase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCAmelCase__, lowerCAmelCase__, prev_module_hook=lowerCAmelCase__)
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker, lowerCAmelCase__, prev_module_hook=lowerCAmelCase__)
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a_ ( self) -> Tuple:
if not hasattr(self.unet, '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__, '_hf_hook')
and hasattr(module._hf_hook, 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = 512, lowerCAmelCase__ = 512, lowerCAmelCase__ = 100, lowerCAmelCase__ = 4.0, lowerCAmelCase__ = 1, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = "pil", lowerCAmelCase__ = True, ) -> Union[str, Any]:
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1
elif isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = len(lowerCAmelCase__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__)}')
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = torch.cat(lowerCAmelCase__, dim=0)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = torch.cat(lowerCAmelCase__, dim=0)
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = negative_image_embeds.repeat_interleave(lowerCAmelCase__, dim=0)
snake_case_ = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
dtype=prompt_embeds.dtype, device=lowerCAmelCase__)
self.scheduler.set_timesteps(lowerCAmelCase__, device=lowerCAmelCase__)
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCAmelCase__, lowerCAmelCase__, self.movq_scale_factor)
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, self.scheduler, )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__)):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
snake_case_ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
snake_case_ = self.unet(
sample=lowerCAmelCase__, timestep=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, added_cond_kwargs=lowerCAmelCase__, return_dict=lowerCAmelCase__, )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1], dim=1)
snake_case_ , snake_case_ = noise_pred.chunk(2)
snake_case_ , snake_case_ = variance_pred.chunk(2)
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, generator=lowerCAmelCase__, ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCAmelCase__, force_not_quantize=lowerCAmelCase__)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}')
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0, 1)
snake_case_ = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 69 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
_lowerCAmelCase = nn.Parameter(lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
_lowerCAmelCase = nn.Parameter(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
_lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = weights[0][0][0]
_lowerCAmelCase = np.asarray(layer_norm_a[0] )
_lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# lsh weights + output
_lowerCAmelCase = weights[0][1]
if len(lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase , torch_block.attention , lowerCAmelCase )
else:
set_layer_weights_in_torch_local(lowerCAmelCase , torch_block.attention , lowerCAmelCase )
# intermediate weighs
_lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase ) == 4:
_lowerCAmelCase = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# intermediate dense
_lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
# intermediate out
_lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = torch_model.reformer
# word embeds
_lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase ) , )
if isinstance(weights[3] , lowerCAmelCase ):
_lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
_lowerCAmelCase = nn.Parameter(torch.tensor(lowerCAmelCase ) )
_lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# output layer norm
_lowerCAmelCase = np.asarray(weights[7][0] )
_lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# output embeddings
_lowerCAmelCase = np.asarray(weights[9][0] )
_lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ReformerConfig.from_json_file(lowerCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase = ReformerModelWithLMHead(lowerCAmelCase )
with open(lowerCAmelCase , """rb""" ) as f:
_lowerCAmelCase = pickle.load(lowerCAmelCase )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase , lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
A__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Dict =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 70 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Tuple = PandasConfig
def lowercase__ ( self : Optional[Any] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) )
yield i, self._cast_table(__snake_case )
| 70 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
lowercase = checkpoint['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowercase = XGLMConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowercase = XGLMForCausalLM(lowerCAmelCase__ )
lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Dict = parser.parse_args()
lowercase__ :Any = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Any = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 1 |
"""simple docstring"""
snake_case__ : Optional[Any] = {str(digit): digit**5 for digit in range(10)}
def _snake_case ( _snake_case : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) )
def _snake_case ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_snake_case ) )
if __name__ == "__main__":
print(solution())
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __magic_name__ :
def __init__( self : str , lowercase_ : List[str] , ):
lowercase_ : Dict = parent
lowercase_ : Any = 13
lowercase_ : Dict = 7
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : Any = True
lowercase_ : Tuple = 99
lowercase_ : Union[str, Any] = 32
lowercase_ : Dict = 2
lowercase_ : Any = 4
lowercase_ : Union[str, Any] = 37
lowercase_ : int = """gelu"""
lowercase_ : List[Any] = 0.1
lowercase_ : Optional[Any] = 0.1
lowercase_ : List[Any] = 512
lowercase_ : Optional[int] = 16
lowercase_ : List[Any] = 2
lowercase_ : str = 0.02
lowercase_ : Optional[int] = 3
lowercase_ : Optional[int] = 4
lowercase_ : Optional[int] = None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = None
lowercase_ : int = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Any = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
(
lowercase_
) : Any = self.prepare_config_and_inputs()
lowercase_ : Optional[Any] = True
lowercase_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : int ):
lowercase_ : Optional[int] = TFEsmModel(config=lowercase_ )
lowercase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ : List[Any] = model(lowercase_ )
lowercase_ : Tuple = [input_ids, input_mask]
lowercase_ : Union[str, Any] = model(lowercase_ )
lowercase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : str , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , ):
lowercase_ : Tuple = True
lowercase_ : int = TFEsmModel(config=lowercase_ )
lowercase_ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowercase_ : str = model(lowercase_ )
lowercase_ : Union[str, Any] = [input_ids, input_mask]
lowercase_ : Optional[Any] = model(lowercase_ , encoder_hidden_states=lowercase_ )
# Also check the case where encoder outputs are not passed
lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = TFEsmForMaskedLM(config=lowercase_ )
lowercase_ : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Union[str, Any] = TFEsmForTokenClassification(config=lowercase_ )
lowercase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
(
lowercase_
) : Dict = config_and_inputs
lowercase_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = TFEsmModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = TFEsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase_ : Dict = model.get_bias()
assert isinstance(lowercase_ , lowercase_ )
for k, v in name.items():
assert isinstance(lowercase_ , tf.Variable )
else:
lowercase_ : Any = model.get_output_embeddings()
assert x is None
lowercase_ : Optional[int] = model.get_bias()
assert name is None
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Dict = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase_ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[Any] = model(lowercase_ )[0]
lowercase_ : Tuple = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase_ )
# compare the actual values for a slice.
lowercase_ : Optional[int] = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase_ : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : List[Any] = model(lowercase_ )[0]
# compare the actual values for a slice.
lowercase_ : List[Any] = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 356 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Tuple = a.name
lowercase_ : Tuple = b.name
lowercase_ : Any = """"""
lowercase_ : List[Any] = """"""
lowercase_ : List[Any] = a == b
lowercase_ : Union[str, Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowercase_ : int = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : Optional[Any] = inits[i].name
lowercase_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> List[str]:
lowercase_ : Dict = os.path.dirname(UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.basename(UpperCAmelCase__ )
lowercase_ : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowercase_ : List[Any] = list(model.graph.initializer )
lowercase_ : int = set()
lowercase_ : int = {}
lowercase_ : str = []
lowercase_ : int = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
lowercase_ : Dict = inits[j].data_type
lowercase_ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCAmelCase__ )
total_reduced_size += mem_size
lowercase_ : int = inits[i].name
lowercase_ : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowercase_ : Tuple = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = """optimized_""" + model_file_name
lowercase_ : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( __SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Tuple = 42
__lowerCAmelCase : Any = 42
def __init__( self : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , _lowerCamelCase : List[str] = 1 , _lowerCamelCase : List[str] = 20_00 , _lowerCamelCase : str = None , _lowerCamelCase : Any = "pil" , _lowerCamelCase : Optional[int] = True , **_lowerCamelCase : Dict , ):
"""simple docstring"""
A_ : Optional[Any] = self.unet.config.sample_size
A_ : Optional[Any] = (batch_size, 3, img_size, img_size)
A_ : List[Any] = self.unet
A_ : Dict = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
A_ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A_ : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
A_ : str = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
A_ : List[str] = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
A_ : str = model(__UpperCAmelCase , __UpperCAmelCase ).sample
A_ : Optional[int] = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
A_ , A_ : str = output.prev_sample, output.prev_sample_mean
A_ : List[str] = sample_mean.clamp(0 , 1 )
A_ : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : List[str] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 167 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase: Tuple = datasets.logging.get_logger(__name__)
lowerCAmelCase: str = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
lowerCAmelCase: Optional[Any] = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
lowerCAmelCase: Union[str, Any] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
lowerCAmelCase: Union[str, Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def lowercase_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def lowercase_ ( self : List[Any] , __snake_case : Dict ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
a : Any = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
a : Optional[Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
a : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
a : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
a : Tuple = score.BleurtScorer(os.path.join(__snake_case , __snake_case ) )
def lowercase_ ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] ):
a : Optional[int] = self.scorer.score(references=__snake_case , candidates=__snake_case )
return {"scores": scores} | 357 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Optional[int] ):
a : int = ''
a : List[str] = ''
a : int = []
a : Optional[Any] = 0
a : Optional[Any] = 2_56
a : int = 0
a : Optional[int] = 0
a : str = 0
a : int = 0
def lowercase_ ( self : List[str] , __snake_case : int ):
a : Optional[Any] = cva.imread(__snake_case , 0 )
a : int = copy.deepcopy(self.img )
a , a , a : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : str = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : List[str] = x[i] / self.k
self.sk += prk
a : List[Any] = (self.L - 1) * self.sk
if self.rem != 0:
a : Union[str, Any] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : int = int(np.ma.count(self.img ) / self.img[1].size )
a : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Tuple = self.img[j][i]
if num != self.last_list[num]:
a : Union[str, Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Union[str, Any] ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : Any ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Dict = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 96 | 0 |
import cva
import numpy as np
class __UpperCAmelCase :
def __init__( self : int, __A : float, __A : int ):
if k in (0.0_4, 0.0_6):
UpperCAmelCase : Tuple = k
UpperCAmelCase : str = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Any ):
return str(self.k )
def __magic_name__ ( self : str, __A : str ):
UpperCAmelCase : Tuple = cva.imread(__A, 0 )
UpperCAmelCase , UpperCAmelCase : int = img.shape
UpperCAmelCase : list[list[int]] = []
UpperCAmelCase : int = img.copy()
UpperCAmelCase : Union[str, Any] = cva.cvtColor(__A, cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = np.gradient(__A )
UpperCAmelCase : int = dx**2
UpperCAmelCase : Dict = dy**2
UpperCAmelCase : str = dx * dy
UpperCAmelCase : Optional[int] = 0.0_4
UpperCAmelCase : str = self.window_size // 2
for y in range(__A, h - offset ):
for x in range(__A, w - offset ):
UpperCAmelCase : Any = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase : str = (wxx * wyy) - (wxy**2)
UpperCAmelCase : Any = wxx + wyy
UpperCAmelCase : Optional[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = HarrisCorner(0.0_4, 3)
_lowerCamelCase , _lowerCamelCase : Any = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 336 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 336 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'roformer'
def __init__( self : Tuple ,__lowerCamelCase : Optional[int]=5_00_00 ,__lowerCamelCase : Any=None ,__lowerCamelCase : Tuple=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : Dict="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : List[str]=15_36 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Any=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Optional[Any]=False ,__lowerCamelCase : str=True ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = hidden_size if embedding_size is None else embedding_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = rotary_value
a = use_cache
class lowerCamelCase_ ( a_ ):
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a = {0: '''batch''', 1: '''sequence'''}
a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 330 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
class lowerCamelCase_ :
def __init__( self : Dict ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = metric_id
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() )
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple:
"""simple docstring"""
if "tmp_path" in args:
a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case_ )
| 330 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : str = text, pattern
lowercase_ , lowercase_ : List[Any] = len(__UpperCamelCase ), len(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCAmelCase ( self ) -> list[int]:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase_ : Optional[int] = self.mismatch_in_text(__UpperCamelCase )
if mismatch_index == -1:
positions.append(__UpperCamelCase )
else:
lowercase_ : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowercase_ : Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__SCREAMING_SNAKE_CASE ="ABAABA"
__SCREAMING_SNAKE_CASE ="AB"
__SCREAMING_SNAKE_CASE =BoyerMooreSearch(text, pattern)
__SCREAMING_SNAKE_CASE =bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 213 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,__UpperCamelCase ,)
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
| 213 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ["""input_features""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int]=80 , SCREAMING_SNAKE_CASE_ : List[Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=10 , SCREAMING_SNAKE_CASE_ : Any=25 , SCREAMING_SNAKE_CASE_ : Dict="hamming_window" , SCREAMING_SNAKE_CASE_ : List[str]=3_2768.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.97 , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Optional[int] = feature_size
A: List[str] = sampling_rate
A: Tuple = padding_value
A: int = hop_length
A: List[str] = win_length
A: List[Any] = frame_signal_scale
A: Optional[Any] = preemphasis_coeff
A: Union[str, Any] = mel_floor
A: int = normalize_means
A: str = normalize_vars
A: int = win_function
A: Union[str, Any] = return_attention_mask
A: int = win_length * sampling_rate // 10_00
A: Union[str, Any] = hop_length * sampling_rate // 10_00
A: Optional[int] = optimal_fft_length(self.sample_size )
A: Union[str, Any] = (self.n_fft // 2) + 1
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
A: Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_ )
else:
A: Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
A: List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A: str = spectrogram(
one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
'''simple docstring'''
if self.normalize_means:
A: Dict = x[:input_length].mean(axis=0 )
A: Tuple = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.normalize_vars:
A: List[Any] = x[:input_length].std(axis=0 )
A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
A: int = padding_value
# make sure array is in float32
A: str = x.astype(np.floataa )
return x
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A: Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : str , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A: str = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
A: Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A: Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A: Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A: Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A: Optional[int] = [raw_speech]
# extract fbank features
A: Union[str, Any] = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_ ) for one_waveform in raw_speech]
# convert into correct format for padding
A: List[str] = BatchFeature({'''input_features''': features} )
A: List[Any] = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A: List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
A: Any = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
A: List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A: Any = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A: str = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
A: str = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 334 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: str
__UpperCamelCase: str
__UpperCamelCase: Optional[str] = None
__UpperCamelCase: Optional[str] = None
__UpperCamelCase: Optional[str] = None
@dataclass(frozen=snake_case__ )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: List[int]
__UpperCamelCase: Optional[List[int]] = None
__UpperCamelCase: Optional[List[int]] = None
__UpperCamelCase: Optional[Union[int, float]] = None
__UpperCamelCase: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[InputFeatures]
def __init__( self : Optional[Any] , A : str , A : PreTrainedTokenizer , A : str , A : Optional[int] = None , A : List[Any]=False , A : bool = False , ):
_UpperCAmelCase : Optional[int] = hans_processors[task]()
_UpperCAmelCase : int = os.path.join(
A , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(A ) , A , ) , )
_UpperCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Tuple = label_list[2], label_list[1]
_UpperCAmelCase : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_UpperCAmelCase : str = torch.load(A )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_UpperCAmelCase : Optional[int] = (
processor.get_dev_examples(A ) if evaluate else processor.get_train_examples(A )
)
logger.info("Training examples: %s" , len(A ) )
_UpperCAmelCase : Optional[Any] = hans_convert_examples_to_features(A , A , A , A )
logger.info("Saving features into cached file %s" , A )
torch.save(self.features , A )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , A : str ):
return self.features[i]
def _A ( self : Optional[int] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: List[InputFeatures]
def __init__( self : Any , A : str , A : PreTrainedTokenizer , A : str , A : Optional[int] = 128 , A : str=False , A : bool = False , ):
_UpperCAmelCase : List[str] = hans_processors[task]()
_UpperCAmelCase : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : int = label_list[2], label_list[1]
_UpperCAmelCase : Optional[Any] = label_list
_UpperCAmelCase : Tuple = processor.get_dev_examples(A ) if evaluate else processor.get_train_examples(A )
_UpperCAmelCase : List[str] = hans_convert_examples_to_features(A , A , A , A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase : List[str] = tf.data.Dataset.from_generator(
A , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _A ( self : Optional[int] ):
return self.dataset
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : int , A : List[Any] ):
return self.features[i]
def _A ( self : Optional[int] ):
return self.label_list
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Optional[Any] , A : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(A , "heuristics_train_set.txt" ) ) , "train" )
def _A ( self : Optional[int] , A : List[str] ):
return self._create_examples(self._read_tsv(os.path.join(A , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _A ( self : Optional[int] ):
return ["contradiction", "entailment", "neutral"]
def _A ( self : Tuple , A : Optional[int] , A : Tuple ):
_UpperCAmelCase : List[Any] = []
for i, line in enumerate(A ):
if i == 0:
continue
_UpperCAmelCase : int = "%s-%s" % (set_type, line[0])
_UpperCAmelCase : int = line[5]
_UpperCAmelCase : Tuple = line[6]
_UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
_UpperCAmelCase : int = line[0]
examples.append(InputExample(guid=A , text_a=A , text_b=A , label=A , pairID=A ) )
return examples
def UpperCamelCase_ ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {label: i for i, label in enumerate(_UpperCAmelCase )}
_UpperCAmelCase : str = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
_UpperCAmelCase : str = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
_UpperCAmelCase : Dict = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase : Dict = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""hans""": 3,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""hans""": HansProcessor,
}
| 31 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , A : int = 1000 , A : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
_UpperCAmelCase : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase : int = 4
# running values
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : int , A : Union[str, torch.device] = None ):
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_UpperCAmelCase : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCAmelCase : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase : List[str] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_UpperCAmelCase : Dict = timesteps.to(A )
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
_UpperCAmelCase : Tuple = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase : Optional[Any] = timestep_index + 1
_UpperCAmelCase : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
_UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCAmelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase : Union[str, Any] = self._get_prev_sample(A , A , A , A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Union[str, Any] , A : torch.FloatTensor , *A : Union[str, Any] , **A : Dict ):
return sample
def _A ( self : Optional[Any] , A : Optional[int] , A : int , A : Optional[Any] , A : List[str] ):
_UpperCAmelCase : List[str] = self.alphas[timestep_index]
_UpperCAmelCase : List[Any] = self.betas[timestep_index]
_UpperCAmelCase : Optional[Any] = self.alphas[prev_timestep_index]
_UpperCAmelCase : Dict = self.betas[prev_timestep_index]
_UpperCAmelCase : Tuple = (sample - sigma * ets) / max(A , 1E-8 )
_UpperCAmelCase : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 31 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[int] = "efficientformer"
def __init__( self : Optional[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-1_2 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-0_5 , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :List[str] = hidden_act
lowercase :Dict = hidden_dropout_prob
lowercase :Union[str, Any] = hidden_sizes
lowercase :Union[str, Any] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = initializer_range
lowercase :List[str] = layer_norm_eps
lowercase :Union[str, Any] = patch_size
lowercase :Tuple = num_channels
lowercase :Dict = depths
lowercase :List[Any] = mlp_expansion_ratio
lowercase :Tuple = downsamples
lowercase :Union[str, Any] = dim
lowercase :List[str] = key_dim
lowercase :Any = attention_ratio
lowercase :List[Any] = resolution
lowercase :Optional[int] = pool_size
lowercase :Union[str, Any] = downsample_patch_size
lowercase :str = downsample_stride
lowercase :int = downsample_pad
lowercase :int = drop_path_rate
lowercase :Union[str, Any] = num_metaad_blocks
lowercase :int = distillation
lowercase :Optional[int] = use_layer_scale
lowercase :Any = layer_scale_init_value
lowercase :Dict = image_size
lowercase :int = batch_norm_eps
| 356 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "layoutlmv3"
def __init__( self : int , snake_case__ : Any=5_0_2_6_5 , snake_case__ : int=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Union[str, Any]=3_0_7_2 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : int=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Optional[int]=1 , snake_case__ : Any=0 , snake_case__ : Optional[int]=2 , snake_case__ : int=1_0_2_4 , snake_case__ : str=1_2_8 , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=1_2_8 , snake_case__ : List[Any]=6_4 , snake_case__ : List[Any]=2_5_6 , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=2_2_4 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : str=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase :Optional[int] = max_ad_position_embeddings
lowercase :Tuple = coordinate_size
lowercase :Any = shape_size
lowercase :Union[str, Any] = has_relative_attention_bias
lowercase :Optional[Any] = rel_pos_bins
lowercase :Tuple = max_rel_pos
lowercase :Any = has_spatial_attention_bias
lowercase :Any = rel_ad_pos_bins
lowercase :str = max_rel_ad_pos
lowercase :int = text_embed
lowercase :Optional[int] = visual_embed
lowercase :str = input_size
lowercase :List[str] = num_channels
lowercase :str = patch_size
lowercase :Any = classifier_dropout
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = version.parse("1.12" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return 1e-5
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 1_2
def __snake_case ( self : str , snake_case__ : "ProcessorMixin" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 4_0 , snake_case__ : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase :Dict = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
lowercase :List[str] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowercase :Tuple = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase :List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase :List[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :Dict = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 172 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A: Dict = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
A: Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
A: List[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="binary" , _SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = fa_score(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE )
return {"f1": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 109 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase ) | 8 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
A_ : int = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = field(default=_lowerCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self ) -> Optional[int]:
if self.train_file is not None:
_UpperCAmelCase : Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : List[Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self ,a_ ) -> str:
_UpperCAmelCase : List[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_UpperCAmelCase : Optional[int] = [feature.pop(a_ ) for feature in features]
_UpperCAmelCase : Union[str, Any] = len(a_ )
_UpperCAmelCase : Optional[Any] = len(features[0]["""input_ids"""] )
_UpperCAmelCase : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(a_ )] for feature in features
]
_UpperCAmelCase : Optional[Any] = list(chain(*a_ ) )
_UpperCAmelCase : List[Any] = self.tokenizer.pad(
a_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
# Un-flatten
_UpperCAmelCase : List[Any] = {k: v.view(a_ ,a_ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : Any = torch.tensor(a_ ,dtype=torch.intaa )
return batch
def snake_case_ ( )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : str = {}
if data_args.train_file is not None:
_UpperCAmelCase : List[Any] = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Dict = data_args.validation_file
_UpperCAmelCase : List[str] = data_args.train_file.split(""".""" )[-1]
_UpperCAmelCase : Dict = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : Any = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : str = [F'''ending{i}''' for i in range(4 )]
_UpperCAmelCase : List[Any] = """sent1"""
_UpperCAmelCase : Tuple = """sent2"""
if data_args.max_seq_length is None:
_UpperCAmelCase : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
_UpperCAmelCase : Tuple = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase_ ):
_UpperCAmelCase : int = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : str = examples[question_header_name]
_UpperCAmelCase : Optional[int] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
_UpperCAmelCase : List[Any] = list(chain(*lowerCAmelCase_ ) )
_UpperCAmelCase : Union[str, Any] = list(chain(*lowerCAmelCase_ ) )
# Tokenize
_UpperCAmelCase : int = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_UpperCAmelCase : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Union[str, Any] = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
_UpperCAmelCase : Optional[int] = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_UpperCAmelCase : Dict = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_UpperCAmelCase : List[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Dict = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
_UpperCAmelCase : Tuple = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_UpperCAmelCase : Optional[int] = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase_ ):
_UpperCAmelCase : str = eval_predictions
_UpperCAmelCase : Optional[int] = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : str = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Dict = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : Dict = train_result.metrics
_UpperCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=0.0 , lowercase = None , lowercase = "geglu" , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = "layer_norm" , lowercase = False , ) -> int:
'''simple docstring'''
super().__init__()
a__: Dict = only_cross_attention
a__: Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
a__: Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.')
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
a__: Dict = AdaLayerNorm(lowerCamelCase__ , lowerCamelCase__)
elif self.use_ada_layer_norm_zero:
a__: List[str] = AdaLayerNormZero(lowerCamelCase__ , lowerCamelCase__)
else:
a__: Tuple = nn.LayerNorm(lowerCamelCase__ , elementwise_affine=lowerCamelCase__)
a__: Any = Attention(
query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , dropout=lowerCamelCase__ , bias=lowerCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowerCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
a__: int = (
AdaLayerNorm(lowerCamelCase__ , lowerCamelCase__)
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase__ , elementwise_affine=lowerCamelCase__)
)
a__: Dict = Attention(
query_dim=lowerCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , dropout=lowerCamelCase__ , bias=lowerCamelCase__ , upcast_attention=lowerCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
a__: Dict = None
a__: Dict = None
# 3. Feed-forward
a__: Optional[Any] = nn.LayerNorm(lowerCamelCase__ , elementwise_affine=lowerCamelCase__)
a__: int = FeedForward(lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn=lowerCamelCase__ , final_dropout=lowerCamelCase__)
# let chunk size default to None
a__: Optional[Any] = None
a__: int = 0
def lowerCamelCase_ ( self , lowercase , lowercase) -> int:
'''simple docstring'''
a__: List[str] = chunk_size
a__: int = dim
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]:
'''simple docstring'''
if self.use_ada_layer_norm:
a__: Optional[int] = self.norma(lowerCamelCase__ , lowerCamelCase__)
elif self.use_ada_layer_norm_zero:
a__: Tuple = self.norma(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hidden_dtype=hidden_states.dtype)
else:
a__: Tuple = self.norma(lowerCamelCase__)
a__: Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
a__: str = self.attna(
lowerCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
if self.use_ada_layer_norm_zero:
a__: List[Any] = gate_msa.unsqueeze(1) * attn_output
a__: str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
a__: Optional[int] = (
self.norma(lowerCamelCase__ , lowerCamelCase__) if self.use_ada_layer_norm else self.norma(lowerCamelCase__)
)
a__: Optional[Any] = self.attna(
lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
a__: Dict = attn_output + hidden_states
# 3. Feed-forward
a__: str = self.norma(lowerCamelCase__)
if self.use_ada_layer_norm_zero:
a__: Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.')
a__: Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
a__: Optional[Any] = torch.cat(
[self.ff(lowerCamelCase__) for hid_slice in norm_hidden_states.chunk(lowerCamelCase__ , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
a__: Dict = self.ff(lowerCamelCase__)
if self.use_ada_layer_norm_zero:
a__: Optional[Any] = gate_mlp.unsqueeze(1) * ff_output
a__: Optional[int] = ff_output + hidden_states
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase = None , lowercase = 4 , lowercase = 0.0 , lowercase = "geglu" , lowercase = False , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
a__: str = int(dim * mult)
a__: Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
a__: Dict = GELU(lowerCamelCase__ , lowerCamelCase__)
if activation_fn == "gelu-approximate":
a__: Optional[int] = GELU(lowerCamelCase__ , lowerCamelCase__ , approximate='tanh')
elif activation_fn == "geglu":
a__: Any = GEGLU(lowerCamelCase__ , lowerCamelCase__)
elif activation_fn == "geglu-approximate":
a__: Optional[Any] = ApproximateGELU(lowerCamelCase__ , lowerCamelCase__)
a__: Dict = nn.ModuleList([])
# project in
self.net.append(lowerCamelCase__)
# project dropout
self.net.append(nn.Dropout(lowerCamelCase__))
# project out
self.net.append(nn.Linear(lowerCamelCase__ , lowerCamelCase__))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase__))
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
for module in self.net:
a__: int = module(lowerCamelCase__)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = "none") -> int:
'''simple docstring'''
super().__init__()
a__: Optional[int] = nn.Linear(lowerCamelCase__ , lowerCamelCase__)
a__: int = approximate
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__ , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = self.proj(lowerCamelCase__)
a__: Union[str, Any] = self.gelu(lowerCamelCase__)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase) -> Dict:
'''simple docstring'''
super().__init__()
a__: Union[str, Any] = nn.Linear(lowerCamelCase__ , dim_out * 2)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
a__: Any = self.proj(lowerCamelCase__).chunk(2 , dim=-1)
return hidden_states * self.gelu(lowerCamelCase__)
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
a__: Union[str, Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = self.proj(lowerCamelCase__)
return x * torch.sigmoid(1.702 * x)
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
super().__init__()
a__: Dict = nn.Embedding(lowerCamelCase__ , lowerCamelCase__)
a__: Tuple = nn.SiLU()
a__: Tuple = nn.Linear(lowerCamelCase__ , embedding_dim * 2)
a__: List[str] = nn.LayerNorm(lowerCamelCase__ , elementwise_affine=lowerCamelCase__)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: int = self.linear(self.silu(self.emb(lowerCamelCase__)))
a__: Optional[int] = torch.chunk(lowerCamelCase__ , 2)
a__: Tuple = self.norm(lowerCamelCase__) * (1 + scale) + shift
return x
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
super().__init__()
a__: Union[str, Any] = CombinedTimestepLabelEmbeddings(lowerCamelCase__ , lowerCamelCase__)
a__: List[Any] = nn.SiLU()
a__: str = nn.Linear(lowerCamelCase__ , 6 * embedding_dim , bias=lowerCamelCase__)
a__: List[Any] = nn.LayerNorm(lowerCamelCase__ , elementwise_affine=lowerCamelCase__ , eps=1e-6)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> str:
'''simple docstring'''
a__: Tuple = self.linear(self.silu(self.emb(lowerCamelCase__ , lowerCamelCase__ , hidden_dtype=lowerCamelCase__)))
a__: Optional[Any] = emb.chunk(6 , dim=1)
a__: List[str] = self.norm(lowerCamelCase__) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __snake_case ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = 1e-5) -> Optional[Any]:
'''simple docstring'''
super().__init__()
a__: int = num_groups
a__: Dict = eps
if act_fn is None:
a__: Union[str, Any] = None
else:
a__: Optional[int] = get_activation(lowerCamelCase__)
a__: Dict = nn.Linear(lowerCamelCase__ , out_dim * 2)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if self.act:
a__: Dict = self.act(lowerCamelCase__)
a__: Optional[int] = self.linear(lowerCamelCase__)
a__: int = emb[:, :, None, None]
a__: Tuple = emb.chunk(2 , dim=1)
a__: Any = F.group_norm(lowerCamelCase__ , self.num_groups , eps=self.eps)
a__: List[str] = x * (1 + scale) + shift
return x
| 290 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PegasusTokenizer
__SCREAMING_SNAKE_CASE = PegasusTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def snake_case_ ( self , **_snake_case ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''</s>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_snake_case ) , 1103 )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
UpperCAmelCase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase = '''To ensure a smooth flow of bank resolutions.'''
UpperCAmelCase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ['''This is going to be way too long.''' * 150, '''short example''']
UpperCAmelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCAmelCase = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
UpperCAmelCase = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
# fmt: off
UpperCAmelCase = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PegasusTokenizer
__SCREAMING_SNAKE_CASE = PegasusTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def snake_case_ ( self , **_snake_case ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ['''This is going to be way too long.''' * 1000, '''short example''']
UpperCAmelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCAmelCase = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
UpperCAmelCase = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
UpperCAmelCase = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 152 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__a = None
try:
import msvcrt
except ImportError:
__a = None
try:
import fcntl
except ImportError:
__a = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__a = OSError
# Data
# ------------------------------------------------
__a = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__a = "3.0.12"
__a = None
def __snake_case( ) -> List[str]:
global _logger
snake_case__ : Optional[Any] = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : List[Any] ):
snake_case__ : List[str] = lock_file
return None
def __str__( self : Any ):
snake_case__ : Optional[int] = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : int ):
snake_case__ : str = lock
return None
def __enter__( self : Dict ):
return self.lock
def __exit__( self : Any , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Tuple ):
self.lock.release()
return None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any]=-1 , snake_case_ : List[Any]=None ):
snake_case__ : Any = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
snake_case__ : Any = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
snake_case__ : Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case__ : List[str] = None
# The default timeout value.
snake_case__ : str = timeout
# We use this lock primarily for the lock counter.
snake_case__ : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case__ : Tuple = 0
return None
@property
def lowerCamelCase ( self : Any ):
return self._lock_file
@property
def lowerCamelCase ( self : Optional[int] ):
return self._timeout
@timeout.setter
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
snake_case__ : int = float(snake_case_ )
return None
def lowerCamelCase ( self : Optional[int] ):
raise NotImplementedError()
def lowerCamelCase ( self : Union[str, Any] ):
raise NotImplementedError()
@property
def lowerCamelCase ( self : int ):
return self._lock_file_fd is not None
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str]=None , snake_case_ : str=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
snake_case__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case__ : List[Any] = id(self )
snake_case__ : Any = self._lock_file
snake_case__ : Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case__ : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self : str , snake_case_ : Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case__ : Optional[int] = id(self )
snake_case__ : Optional[int] = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
snake_case__ : Dict = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Dict ):
self.acquire()
return self
def __exit__( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict ):
self.release()
return None
def __del__( self : List[str] ):
self.release(force=snake_case_ )
return None
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : int ):
snake_case__ : Optional[int] = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
snake_case__ : Dict = os.path.dirname(snake_case_ )
snake_case__ : Optional[int] = str(hash(snake_case_ ) )
snake_case__ : Optional[Any] = filename[: max_length - len(snake_case_ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : Optional[Any] , snake_case_ : Dict=-1 , snake_case_ : Optional[int]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
snake_case__ : List[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self : Any ):
snake_case__ : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case__ : str = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
snake_case__ : List[Any] = fd
return None
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self._lock_file_fd
snake_case__ : Tuple = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : int , snake_case_ : List[str] , snake_case_ : Optional[Any]=-1 , snake_case_ : Optional[Any]=None ):
snake_case__ : Union[str, Any] = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case__ : Dict = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
snake_case__ : List[Any] = fd
return None
def lowerCamelCase ( self : int ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
snake_case__ : Union[str, Any] = self._lock_file_fd
snake_case__ : Optional[Any] = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case__ : Any = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
snake_case__ : str = fd
return None
def lowerCamelCase ( self : Optional[Any] ):
os.close(self._lock_file_fd )
snake_case__ : str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__a = None
if msvcrt:
__a = WindowsFileLock
elif fcntl:
__a = UnixFileLock
else:
__a = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 35 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a__: Any = get_tests_dir('fixtures/test_sentencepiece.model')
a__: str = {'target_lang': 'fi', 'source_lang': 'en'}
a__: Optional[Any] = '>>zh<<'
a__: Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
a__: Optional[int] = 'pt'
elif is_tf_available():
a__: List[str] = 'tf'
else:
a__: str = 'jax'
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MarianTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self ):
super().setUp()
A__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
A__ = dict(zip(lowerCAmelCase__,range(len(lowerCAmelCase__ ) ) ) )
A__ = Path(self.tmpdirname )
save_json(lowerCAmelCase__,save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(lowerCAmelCase__,save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase__,save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(lowerCAmelCase__,save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
A__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return MarianTokenizer.from_pretrained(self.tmpdirname,**lowerCAmelCase__ )
def UpperCamelCase ( self,__lowerCamelCase ):
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self ):
A__ = "</s>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ),lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ),lowerCAmelCase__ )
def UpperCamelCase ( self ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'''</s>''' )
self.assertEqual(vocab_keys[1],'''<unk>''' )
self.assertEqual(vocab_keys[-1],'''<pad>''' )
self.assertEqual(len(lowerCAmelCase__ ),9 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size,9 )
def UpperCamelCase ( self ):
A__ = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
A__ = en_de_tokenizer(['''I am a small frog'''],return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__,lowerCAmelCase__ )
A__ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCAmelCase__,batch.input_ids[0] )
A__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase__ )
A__ = [x.name for x in Path(lowerCAmelCase__ ).glob('''*''' )]
self.assertIn('''source.spm''',lowerCAmelCase__ )
MarianTokenizer.from_pretrained(lowerCAmelCase__ )
def UpperCamelCase ( self ):
A__ = self.get_tokenizer()
A__ = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''],padding=lowerCAmelCase__,truncation=lowerCAmelCase__,return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape,(2, 512) )
def UpperCamelCase ( self ):
A__ = self.get_tokenizer()
A__ = tok(['''I am a tiny frog''', '''I am a small frog'''],padding=lowerCAmelCase__,return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__,lowerCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape,(2, 10) )
@slow
def UpperCamelCase ( self ):
A__ = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__,model_name='''Helsinki-NLP/opus-mt-en-de''',revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''',decode_kwargs={'''use_source_tokenizer''': True},)
def UpperCamelCase ( self ):
A__ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
A__ = "Tämä on testi"
A__ = "This is a test"
A__ = [76, 7, 2047, 2]
A__ = [69, 12, 11, 940, 2]
A__ = tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
A__ = tokenizer(text_target=lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
A__ = tokenizer.decode(lowerCAmelCase__,skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__,lowerCAmelCase__ )
| 367 |
def UpperCamelCase__( UpperCamelCase__ : str )->str:
A__ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__( UpperCamelCase__ : str )->dict[str, str]:
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(UpperCamelCase__ )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( )->None:
A__ = input('''Enter message to encode or decode: ''' ).strip()
A__ = input('''Enter keyword: ''' ).strip()
A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
A__ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
A__ = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCamelCase__: str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Dict , **__snake_case : List[str] ) -> Optional[Any]:
requires_backends(self , ['''bs4'''] )
super().__init__(**__snake_case )
def A ( self : str , __snake_case : int ) -> Optional[Any]:
UpperCAmelCase : str = []
UpperCAmelCase : int = []
UpperCAmelCase : List[str] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase : Dict = parent.find_all(child.name , recursive=__snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__snake_case ) else next(i for i, s in enumerate(__snake_case , 1 ) if s is child ) )
UpperCAmelCase : int = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A ( self : Any , __snake_case : int ) -> List[Any]:
UpperCAmelCase : List[str] = BeautifulSoup(__snake_case , '''html.parser''' )
UpperCAmelCase : str = []
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : int = []
for element in html_code.descendants:
if type(__snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase : List[str] = html.unescape(__snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__snake_case )
UpperCAmelCase , UpperCAmelCase : str = self.xpath_soup(__snake_case )
stringaxtag_seq.append(__snake_case )
stringaxsubs_seq.append(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Dict:
UpperCAmelCase : int = ''''''
for tagname, subs in zip(__snake_case , __snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __snake_case : Optional[int] ) -> BatchFeature:
UpperCAmelCase : List[Any] = False
# Check that strings has a valid type
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Dict = True
elif isinstance(__snake_case , (list, tuple) ):
if len(__snake_case ) == 0 or isinstance(html_strings[0] , __snake_case ):
UpperCAmelCase : int = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(__snake_case )}.""" )
UpperCAmelCase : List[str] = bool(isinstance(__snake_case , (list, tuple) ) and (isinstance(html_strings[0] , __snake_case )) )
if not is_batched:
UpperCAmelCase : Any = [html_strings]
# Get nodes + xpaths
UpperCAmelCase : Tuple = []
UpperCAmelCase : List[str] = []
for html_string in html_strings:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.get_three_from_single(__snake_case )
nodes.append(__snake_case )
UpperCAmelCase : Any = []
for node, tag_list, sub_list in zip(__snake_case , __snake_case , __snake_case ):
UpperCAmelCase : str = self.construct_xpath(__snake_case , __snake_case )
xpath_strings.append(__snake_case )
xpaths.append(__snake_case )
# return as Dict
UpperCAmelCase : int = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCAmelCase : int = BatchFeature(data=__snake_case , tensor_type=__snake_case )
return encoded_inputs
| 23 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 1 |
"""simple docstring"""
from typing import Any
def A_ ( snake_case_ : list ,snake_case_ : list ,snake_case_ : dict ,snake_case_ : dict ,snake_case_ : dict ,):
'''simple docstring'''
_validation(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,)
# Creates data structures and fill initial step
UpperCamelCase : dict = {}
UpperCamelCase : dict = {}
for state in states_space:
UpperCamelCase : int = observations_space[0]
UpperCamelCase : Optional[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 ,len(snake_case_ ) ):
UpperCamelCase : Optional[Any] = observations_space[o]
UpperCamelCase : Dict = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase : Union[str, Any] = """"""
UpperCamelCase : Any = -1
for k_state in states_space:
UpperCamelCase : Dict = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase : Optional[Any] = probability
UpperCamelCase : Dict = k_state
# Update probabilities and pointers dicts
UpperCamelCase : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase : List[Any] = arg_max
# The final observation
UpperCamelCase : Union[str, Any] = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
UpperCamelCase : List[str] = """"""
UpperCamelCase : List[Any] = -1
for k_state in states_space:
UpperCamelCase : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase : int = probability
UpperCamelCase : List[Any] = k_state
UpperCamelCase : Union[str, Any] = arg_max
# Process pointers backwards
UpperCamelCase : Optional[int] = last_state
UpperCamelCase : List[str] = []
for o in range(len(snake_case_ ) - 1 ,-1 ,-1 ):
result.append(snake_case_ )
UpperCamelCase : Union[str, Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def A_ ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,):
'''simple docstring'''
_validate_not_empty(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,)
_validate_lists(snake_case_ ,snake_case_ )
_validate_dicts(
snake_case_ ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def A_ ( snake_case_ : Any ,snake_case_ : Any ):
'''simple docstring'''
_validate_list(snake_case_ ,"""observations_space""" )
_validate_list(snake_case_ ,"""states_space""" )
def A_ ( snake_case_ : Any ,snake_case_ : str ):
'''simple docstring'''
if not isinstance(_object ,snake_case_ ):
UpperCamelCase : Any = f'{var_name} must be a list'
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ ,snake_case_ ):
UpperCamelCase : List[Any] = f'{var_name} must be a list of strings'
raise ValueError(snake_case_ )
def A_ ( snake_case_ : Any ,snake_case_ : Any ,snake_case_ : Any ,):
'''simple docstring'''
_validate_dict(snake_case_ ,"""initial_probabilities""" ,snake_case_ )
_validate_nested_dict(snake_case_ ,"""transition_probabilities""" )
_validate_nested_dict(snake_case_ ,"""emission_probabilities""" )
def A_ ( snake_case_ : Any ,snake_case_ : str ):
'''simple docstring'''
_validate_dict(_object ,snake_case_ ,snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : Any ,snake_case_ : str ,snake_case_ : type ,snake_case_ : bool = False ):
'''simple docstring'''
if not isinstance(_object ,snake_case_ ):
UpperCamelCase : str = f'{var_name} must be a dict'
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ ,snake_case_ ) for x in _object ):
UpperCamelCase : Dict = f'{var_name} all keys must be strings'
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ ,snake_case_ ) for x in _object.values() ):
UpperCamelCase : str = """nested dictionary """ if nested else """"""
UpperCamelCase : Dict = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 27 |
"""simple docstring"""
from typing import Any
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = data
UpperCamelCase : Optional[Any] = None
def __repr__( self ):
return f'Node({self.data})'
class lowerCamelCase :
def __init__( self ):
UpperCamelCase : Dict = None
def __iter__( self ):
UpperCamelCase : int = self.head
while node:
yield node.data
UpperCamelCase : Union[str, Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCamelCase : List[Any] = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = current.next
UpperCamelCase : Optional[Any] = data
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCamelCase : Optional[Any] = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
UpperCamelCase : Dict = new_node
elif index == 0:
UpperCamelCase : Any = self.head # link new_node to head
UpperCamelCase : Any = new_node
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Any = temp.next
UpperCamelCase : Optional[Any] = new_node
def a_ ( self ): # print every node data
print(self )
def a_ ( self ):
return self.delete_nth(0 )
def a_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def a_ ( self , SCREAMING_SNAKE_CASE_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCamelCase : Union[str, Any] = self.head # default first node
if index == 0:
UpperCamelCase : Optional[Any] = self.head.next
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : int = temp.next
UpperCamelCase : Optional[Any] = temp.next
UpperCamelCase : Dict = temp.next.next
return delete_node.data
def a_ ( self ):
return self.head is None
def a_ ( self ):
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
UpperCamelCase : Optional[int] = current.next
# Make the current node's next point backwards
UpperCamelCase : Optional[Any] = prev
# Make the previous node be the current node
UpperCamelCase : int = current
# Make the current node the next node (to progress iteration)
UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
UpperCamelCase : Optional[int] = prev
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_ ,i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 ,1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCamelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8 ,1 ) )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
UpperCamelCase : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case_ )
print("""\nReading/changing Node data using indexing:""" )
print(f'Element at Position 1: {linked_list[1]}' )
UpperCamelCase : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 27 | 1 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCamelCase ( __UpperCamelCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
if tokenize_kwargs is None:
lowercase_ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowercase_ = truncation
lowercase_ = tokenize_kwargs
lowercase_ = {}
if return_tensors is not None:
lowercase_ = return_tensors
return preprocess_params, {}, postprocess_params
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.framework
lowercase_ = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
return model_inputs
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = self.model(**UpperCAmelCase )
return model_outputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
| 356 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.